Merge pull request #6247 from SimonBrandner/feature/media-device-handler

This commit is contained in:
Michael Telatynski 2021-06-23 11:10:09 +01:00 committed by GitHub
commit 1fe1288427
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 153 additions and 108 deletions

View file

@ -1,85 +0,0 @@
/*
Copyright 2017 Michael Telatynski <7t3chguy@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import SettingsStore from "./settings/SettingsStore";
import {SettingLevel} from "./settings/SettingLevel";
import {setMatrixCallAudioInput, setMatrixCallVideoInput} from "matrix-js-sdk/src/matrix";
export default {
hasAnyLabeledDevices: async function() {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.some(d => !!d.label);
},
getDevices: function() {
// Only needed for Electron atm, though should work in modern browsers
// once permission has been granted to the webapp
return navigator.mediaDevices.enumerateDevices().then(function(devices) {
const audiooutput = [];
const audioinput = [];
const videoinput = [];
devices.forEach((device) => {
switch (device.kind) {
case 'audiooutput': audiooutput.push(device); break;
case 'audioinput': audioinput.push(device); break;
case 'videoinput': videoinput.push(device); break;
}
});
// console.log("Loaded WebRTC Devices", mediaDevices);
return {
audiooutput,
audioinput,
videoinput,
};
}, (error) => { console.log('Unable to refresh WebRTC Devices: ', error); });
},
loadDevices: function() {
const audioDeviceId = SettingsStore.getValue("webrtc_audioinput");
const videoDeviceId = SettingsStore.getValue("webrtc_videoinput");
setMatrixCallAudioInput(audioDeviceId);
setMatrixCallVideoInput(videoDeviceId);
},
setAudioOutput: function(deviceId) {
SettingsStore.setValue("webrtc_audiooutput", null, SettingLevel.DEVICE, deviceId);
},
setAudioInput: function(deviceId) {
SettingsStore.setValue("webrtc_audioinput", null, SettingLevel.DEVICE, deviceId);
setMatrixCallAudioInput(deviceId);
},
setVideoInput: function(deviceId) {
SettingsStore.setValue("webrtc_videoinput", null, SettingLevel.DEVICE, deviceId);
setMatrixCallVideoInput(deviceId);
},
getAudioOutput: function() {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audiooutput");
},
getAudioInput: function() {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audioinput");
},
getVideoInput: function() {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_videoinput");
},
};

120
src/MediaDeviceHandler.ts Normal file
View file

@ -0,0 +1,120 @@
/*
Copyright 2017 Michael Telatynski <7t3chguy@gmail.com>
Copyright 2021 Šimon Brandner <simon.bra.ag@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import SettingsStore from "./settings/SettingsStore";
import { SettingLevel } from "./settings/SettingLevel";
import { setMatrixCallAudioInput, setMatrixCallVideoInput } from "matrix-js-sdk/src/matrix";
import EventEmitter from 'events';
interface IMediaDevices {
audioOutput: Array<MediaDeviceInfo>;
audioInput: Array<MediaDeviceInfo>;
videoInput: Array<MediaDeviceInfo>;
}
export enum MediaDeviceHandlerEvent {
AudioOutputChanged = "audio_output_changed",
}
export default class MediaDeviceHandler extends EventEmitter {
private static internalInstance;
public static get instance(): MediaDeviceHandler {
if (!MediaDeviceHandler.internalInstance) {
MediaDeviceHandler.internalInstance = new MediaDeviceHandler();
}
return MediaDeviceHandler.internalInstance;
}
public static async hasAnyLabeledDevices(): Promise<boolean> {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.some(d => Boolean(d.label));
}
public static async getDevices(): Promise<IMediaDevices> {
// Only needed for Electron atm, though should work in modern browsers
// once permission has been granted to the webapp
try {
const devices = await navigator.mediaDevices.enumerateDevices();
const audioOutput = [];
const audioInput = [];
const videoInput = [];
devices.forEach((device) => {
switch (device.kind) {
case 'audiooutput': audioOutput.push(device); break;
case 'audioinput': audioInput.push(device); break;
case 'videoinput': videoInput.push(device); break;
}
});
return { audioOutput, audioInput, videoInput };
} catch (error) {
console.warn('Unable to refresh WebRTC Devices: ', error);
}
}
/**
* Retrieves devices from the SettingsStore and tells the js-sdk to use them
*/
public static loadDevices(): void {
const audioDeviceId = SettingsStore.getValue("webrtc_audioinput");
const videoDeviceId = SettingsStore.getValue("webrtc_videoinput");
setMatrixCallAudioInput(audioDeviceId);
setMatrixCallVideoInput(videoDeviceId);
}
public setAudioOutput(deviceId: string): void {
SettingsStore.setValue("webrtc_audiooutput", null, SettingLevel.DEVICE, deviceId);
this.emit(MediaDeviceHandlerEvent.AudioOutputChanged, deviceId);
}
/**
* This will not change the device that a potential call uses. The call will
* need to be ended and started again for this change to take effect
* @param {string} deviceId
*/
public setAudioInput(deviceId: string): void {
SettingsStore.setValue("webrtc_audioinput", null, SettingLevel.DEVICE, deviceId);
setMatrixCallAudioInput(deviceId);
}
/**
* This will not change the device that a potential call uses. The call will
* need to be ended and started again for this change to take effect
* @param {string} deviceId
*/
public setVideoInput(deviceId: string): void {
SettingsStore.setValue("webrtc_videoinput", null, SettingLevel.DEVICE, deviceId);
setMatrixCallVideoInput(deviceId);
}
public static getAudioOutput(): string {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audiooutput");
}
public static getAudioInput(): string {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audioinput");
}
public static getVideoInput(): string {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_videoinput");
}
}

View file

@ -22,7 +22,7 @@ import { MatrixClient } from 'matrix-js-sdk/src/client';
import {Key} from '../../Keyboard';
import PageTypes from '../../PageTypes';
import CallMediaHandler from '../../CallMediaHandler';
import MediaDeviceHandler from '../../MediaDeviceHandler';
import { fixupColorFonts } from '../../utils/FontManager';
import * as sdk from '../../index';
import dis from '../../dispatcher/dispatcher';
@ -167,7 +167,7 @@ class LoggedInView extends React.Component<IProps, IState> {
// stash the MatrixClient in case we log out before we are unmounted
this._matrixClient = this.props.matrixClient;
CallMediaHandler.loadDevices();
MediaDeviceHandler.loadDevices();
fixupColorFonts();

View file

@ -30,7 +30,7 @@ import RecordingPlayback from "../voice_messages/RecordingPlayback";
import {MsgType} from "matrix-js-sdk/src/@types/event";
import Modal from "../../../Modal";
import ErrorDialog from "../dialogs/ErrorDialog";
import CallMediaHandler from "../../../CallMediaHandler";
import MediaDeviceHandler from "../../../MediaDeviceHandler";
interface IProps {
room: Room;
@ -129,8 +129,8 @@ export default class VoiceRecordComposerTile extends React.PureComponent<IProps,
// Do a sanity test to ensure we're about to grab a valid microphone reference. Things might
// change between this and recording, but at least we will have tried.
try {
const devices = await CallMediaHandler.getDevices();
if (!devices?.['audioinput']?.length) {
const devices = await MediaDeviceHandler.getDevices();
if (!devices?.['audioInput']?.length) {
Modal.createTrackedDialog('No Microphone Error', '', ErrorDialog, {
title: _t("No microphone found"),
description: <>

View file

@ -18,7 +18,7 @@ limitations under the License.
import React from 'react';
import {_t} from "../../../../../languageHandler";
import SdkConfig from "../../../../../SdkConfig";
import CallMediaHandler from "../../../../../CallMediaHandler";
import MediaDeviceHandler from "../../../../../MediaDeviceHandler";
import Field from "../../../elements/Field";
import AccessibleButton from "../../../elements/AccessibleButton";
import {MatrixClientPeg} from "../../../../../MatrixClientPeg";
@ -41,7 +41,7 @@ export default class VoiceUserSettingsTab extends React.Component {
}
async componentDidMount() {
const canSeeDeviceLabels = await CallMediaHandler.hasAnyLabeledDevices();
const canSeeDeviceLabels = await MediaDeviceHandler.hasAnyLabeledDevices();
if (canSeeDeviceLabels) {
this._refreshMediaDevices();
}
@ -49,10 +49,10 @@ export default class VoiceUserSettingsTab extends React.Component {
_refreshMediaDevices = async (stream) => {
this.setState({
mediaDevices: await CallMediaHandler.getDevices(),
activeAudioOutput: CallMediaHandler.getAudioOutput(),
activeAudioInput: CallMediaHandler.getAudioInput(),
activeVideoInput: CallMediaHandler.getVideoInput(),
mediaDevices: await MediaDeviceHandler.getDevices(),
activeAudioOutput: MediaDeviceHandler.getAudioOutput(),
activeAudioInput: MediaDeviceHandler.getAudioInput(),
activeVideoInput: MediaDeviceHandler.getVideoInput(),
});
if (stream) {
// kill stream (after we've enumerated the devices, otherwise we'd get empty labels again)
@ -100,21 +100,21 @@ export default class VoiceUserSettingsTab extends React.Component {
};
_setAudioOutput = (e) => {
CallMediaHandler.setAudioOutput(e.target.value);
MediaDeviceHandler.instance.setAudioOutput(e.target.value);
this.setState({
activeAudioOutput: e.target.value,
});
};
_setAudioInput = (e) => {
CallMediaHandler.setAudioInput(e.target.value);
MediaDeviceHandler.instance.setAudioInput(e.target.value);
this.setState({
activeAudioInput: e.target.value,
});
};
_setVideoInput = (e) => {
CallMediaHandler.setVideoInput(e.target.value);
MediaDeviceHandler.instance.setVideoInput(e.target.value);
this.setState({
activeVideoInput: e.target.value,
});
@ -171,7 +171,7 @@ export default class VoiceUserSettingsTab extends React.Component {
}
};
const audioOutputs = this.state.mediaDevices.audiooutput.slice(0);
const audioOutputs = this.state.mediaDevices.audioOutput.slice(0);
if (audioOutputs.length > 0) {
const defaultDevice = getDefaultDevice(audioOutputs);
speakerDropdown = (
@ -183,7 +183,7 @@ export default class VoiceUserSettingsTab extends React.Component {
);
}
const audioInputs = this.state.mediaDevices.audioinput.slice(0);
const audioInputs = this.state.mediaDevices.audioInput.slice(0);
if (audioInputs.length > 0) {
const defaultDevice = getDefaultDevice(audioInputs);
microphoneDropdown = (
@ -195,7 +195,7 @@ export default class VoiceUserSettingsTab extends React.Component {
);
}
const videoInputs = this.state.mediaDevices.videoinput.slice(0);
const videoInputs = this.state.mediaDevices.videoInput.slice(0);
if (videoInputs.length > 0) {
const defaultDevice = getDefaultDevice(videoInputs);
webcamDropdown = (

View file

@ -17,7 +17,7 @@ limitations under the License.
import React, {createRef} from 'react';
import { CallFeed, CallFeedEvent } from 'matrix-js-sdk/src/webrtc/callFeed';
import { logger } from 'matrix-js-sdk/src/logger';
import CallMediaHandler from "../../../CallMediaHandler";
import MediaDeviceHandler, { MediaDeviceHandlerEvent } from "../../../MediaDeviceHandler";
interface IProps {
feed: CallFeed,
@ -27,19 +27,25 @@ export default class AudioFeed extends React.Component<IProps> {
private element = createRef<HTMLAudioElement>();
componentDidMount() {
MediaDeviceHandler.instance.addListener(
MediaDeviceHandlerEvent.AudioOutputChanged,
this.onAudioOutputChanged,
);
this.props.feed.addListener(CallFeedEvent.NewStream, this.onNewStream);
this.playMedia();
}
componentWillUnmount() {
MediaDeviceHandler.instance.removeListener(
MediaDeviceHandlerEvent.AudioOutputChanged,
this.onAudioOutputChanged,
);
this.props.feed.removeListener(CallFeedEvent.NewStream, this.onNewStream);
this.stopMedia();
}
private playMedia() {
private onAudioOutputChanged = (audioOutput: string) => {
const element = this.element.current;
const audioOutput = CallMediaHandler.getAudioOutput();
if (audioOutput) {
try {
// This seems quite unreliable in Chrome, although I haven't yet managed to make a jsfiddle where
@ -52,7 +58,11 @@ export default class AudioFeed extends React.Component<IProps> {
logger.warn("Couldn't set requested audio output device: using default", e);
}
}
}
private playMedia() {
const element = this.element.current;
this.onAudioOutputChanged(MediaDeviceHandler.getAudioOutput());
element.muted = false;
element.srcObject = this.props.feed.stream;
element.autoplay = true;

View file

@ -17,7 +17,7 @@ limitations under the License.
import * as Recorder from 'opus-recorder';
import encoderPath from 'opus-recorder/dist/encoderWorker.min.js';
import {MatrixClient} from "matrix-js-sdk/src/client";
import CallMediaHandler from "../CallMediaHandler";
import MediaDeviceHandler from "../MediaDeviceHandler";
import {SimpleObservable} from "matrix-widget-api";
import {clamp, percentageOf, percentageWithin} from "../utils/numbers";
import EventEmitter from "events";
@ -97,7 +97,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
audio: {
channelCount: CHANNELS,
noiseSuppression: true, // browsers ignore constraints they can't honour
deviceId: CallMediaHandler.getAudioInput(),
deviceId: MediaDeviceHandler.getAudioInput(),
},
});
this.recorderContext = createAudioContext({