mirror of
https://github.com/vector-im/element-web.git
synced 2024-11-16 05:04:57 +08:00
CallMediaHandler -> MediaDeviceHandler
Signed-off-by: Šimon Brandner <simon.bra.ag@gmail.com>
This commit is contained in:
parent
759a4e796f
commit
9bceb40820
@ -1,85 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 Michael Telatynski <7t3chguy@gmail.com>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import SettingsStore from "./settings/SettingsStore";
|
||||
import {SettingLevel} from "./settings/SettingLevel";
|
||||
import {setMatrixCallAudioInput, setMatrixCallVideoInput} from "matrix-js-sdk/src/matrix";
|
||||
|
||||
export default {
|
||||
hasAnyLabeledDevices: async function() {
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
return devices.some(d => !!d.label);
|
||||
},
|
||||
|
||||
getDevices: function() {
|
||||
// Only needed for Electron atm, though should work in modern browsers
|
||||
// once permission has been granted to the webapp
|
||||
return navigator.mediaDevices.enumerateDevices().then(function(devices) {
|
||||
const audiooutput = [];
|
||||
const audioinput = [];
|
||||
const videoinput = [];
|
||||
|
||||
devices.forEach((device) => {
|
||||
switch (device.kind) {
|
||||
case 'audiooutput': audiooutput.push(device); break;
|
||||
case 'audioinput': audioinput.push(device); break;
|
||||
case 'videoinput': videoinput.push(device); break;
|
||||
}
|
||||
});
|
||||
|
||||
// console.log("Loaded WebRTC Devices", mediaDevices);
|
||||
return {
|
||||
audiooutput,
|
||||
audioinput,
|
||||
videoinput,
|
||||
};
|
||||
}, (error) => { console.log('Unable to refresh WebRTC Devices: ', error); });
|
||||
},
|
||||
|
||||
loadDevices: function() {
|
||||
const audioDeviceId = SettingsStore.getValue("webrtc_audioinput");
|
||||
const videoDeviceId = SettingsStore.getValue("webrtc_videoinput");
|
||||
|
||||
setMatrixCallAudioInput(audioDeviceId);
|
||||
setMatrixCallVideoInput(videoDeviceId);
|
||||
},
|
||||
|
||||
setAudioOutput: function(deviceId) {
|
||||
SettingsStore.setValue("webrtc_audiooutput", null, SettingLevel.DEVICE, deviceId);
|
||||
},
|
||||
|
||||
setAudioInput: function(deviceId) {
|
||||
SettingsStore.setValue("webrtc_audioinput", null, SettingLevel.DEVICE, deviceId);
|
||||
setMatrixCallAudioInput(deviceId);
|
||||
},
|
||||
|
||||
setVideoInput: function(deviceId) {
|
||||
SettingsStore.setValue("webrtc_videoinput", null, SettingLevel.DEVICE, deviceId);
|
||||
setMatrixCallVideoInput(deviceId);
|
||||
},
|
||||
|
||||
getAudioOutput: function() {
|
||||
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audiooutput");
|
||||
},
|
||||
|
||||
getAudioInput: function() {
|
||||
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audioinput");
|
||||
},
|
||||
|
||||
getVideoInput: function() {
|
||||
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_videoinput");
|
||||
},
|
||||
};
|
96
src/MediaDeviceHandler.ts
Normal file
96
src/MediaDeviceHandler.ts
Normal file
@ -0,0 +1,96 @@
|
||||
/*
|
||||
Copyright 2017 Michael Telatynski <7t3chguy@gmail.com>
|
||||
Copyright 2021 Šimon Brandner <simon.bra.ag@gmail.com>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import SettingsStore from "./settings/SettingsStore";
|
||||
import { SettingLevel } from "./settings/SettingLevel";
|
||||
import { setMatrixCallAudioInput, setMatrixCallVideoInput } from "matrix-js-sdk/src/matrix";
|
||||
|
||||
interface IMediaDevices {
|
||||
audioOutput: Array<MediaDeviceInfo>;
|
||||
audioInput: Array<MediaDeviceInfo>;
|
||||
videoInput: Array<MediaDeviceInfo>;
|
||||
}
|
||||
|
||||
export default class MediaDeviceHandler {
|
||||
static async hasAnyLabeledDevices(): Promise<boolean> {
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
return devices.some(d => Boolean(d.label));
|
||||
}
|
||||
|
||||
static async getDevices(): Promise<IMediaDevices> {
|
||||
// Only needed for Electron atm, though should work in modern browsers
|
||||
// once permission has been granted to the webapp
|
||||
|
||||
try {
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
|
||||
const audioOutput = [];
|
||||
const audioInput = [];
|
||||
const videoInput = [];
|
||||
|
||||
devices.forEach((device) => {
|
||||
switch (device.kind) {
|
||||
case 'audiooutput': audioOutput.push(device); break;
|
||||
case 'audioinput': audioInput.push(device); break;
|
||||
case 'videoinput': videoInput.push(device); break;
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
audioOutput: audioOutput,
|
||||
audioInput: audioInput,
|
||||
videoInput: videoInput,
|
||||
};
|
||||
} catch (error) {
|
||||
console.log('Unable to refresh WebRTC Devices: ', error);
|
||||
}
|
||||
}
|
||||
|
||||
static loadDevices() {
|
||||
const audioDeviceId = SettingsStore.getValue("webrtc_audioinput");
|
||||
const videoDeviceId = SettingsStore.getValue("webrtc_videoinput");
|
||||
|
||||
setMatrixCallAudioInput(audioDeviceId);
|
||||
setMatrixCallVideoInput(videoDeviceId);
|
||||
}
|
||||
|
||||
static setAudioOutput(deviceId: string) {
|
||||
SettingsStore.setValue("webrtc_audiooutput", null, SettingLevel.DEVICE, deviceId);
|
||||
}
|
||||
|
||||
static setAudioInput(deviceId: string) {
|
||||
SettingsStore.setValue("webrtc_audioinput", null, SettingLevel.DEVICE, deviceId);
|
||||
setMatrixCallAudioInput(deviceId);
|
||||
}
|
||||
|
||||
static setVideoInput(deviceId: string) {
|
||||
SettingsStore.setValue("webrtc_videoinput", null, SettingLevel.DEVICE, deviceId);
|
||||
setMatrixCallVideoInput(deviceId);
|
||||
}
|
||||
|
||||
static getAudioOutput(): string {
|
||||
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audiooutput");
|
||||
}
|
||||
|
||||
static getAudioInput(): string {
|
||||
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audioinput");
|
||||
}
|
||||
|
||||
static getVideoInput(): string {
|
||||
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_videoinput");
|
||||
}
|
||||
}
|
@ -22,7 +22,7 @@ import { MatrixClient } from 'matrix-js-sdk/src/client';
|
||||
|
||||
import {Key} from '../../Keyboard';
|
||||
import PageTypes from '../../PageTypes';
|
||||
import CallMediaHandler from '../../CallMediaHandler';
|
||||
import MediaDeviceHandler from '../../MediaDeviceHandler';
|
||||
import { fixupColorFonts } from '../../utils/FontManager';
|
||||
import * as sdk from '../../index';
|
||||
import dis from '../../dispatcher/dispatcher';
|
||||
@ -167,7 +167,7 @@ class LoggedInView extends React.Component<IProps, IState> {
|
||||
// stash the MatrixClient in case we log out before we are unmounted
|
||||
this._matrixClient = this.props.matrixClient;
|
||||
|
||||
CallMediaHandler.loadDevices();
|
||||
MediaDeviceHandler.loadDevices();
|
||||
|
||||
fixupColorFonts();
|
||||
|
||||
|
@ -30,7 +30,7 @@ import RecordingPlayback from "../voice_messages/RecordingPlayback";
|
||||
import {MsgType} from "matrix-js-sdk/src/@types/event";
|
||||
import Modal from "../../../Modal";
|
||||
import ErrorDialog from "../dialogs/ErrorDialog";
|
||||
import CallMediaHandler from "../../../CallMediaHandler";
|
||||
import MediaDeviceHandler from "../../../MediaDeviceHandler";
|
||||
|
||||
interface IProps {
|
||||
room: Room;
|
||||
@ -129,8 +129,8 @@ export default class VoiceRecordComposerTile extends React.PureComponent<IProps,
|
||||
// Do a sanity test to ensure we're about to grab a valid microphone reference. Things might
|
||||
// change between this and recording, but at least we will have tried.
|
||||
try {
|
||||
const devices = await CallMediaHandler.getDevices();
|
||||
if (!devices?.['audioinput']?.length) {
|
||||
const devices = await MediaDeviceHandler.getDevices();
|
||||
if (!devices?.['audioInput']?.length) {
|
||||
Modal.createTrackedDialog('No Microphone Error', '', ErrorDialog, {
|
||||
title: _t("No microphone found"),
|
||||
description: <>
|
||||
|
@ -18,7 +18,7 @@ limitations under the License.
|
||||
import React from 'react';
|
||||
import {_t} from "../../../../../languageHandler";
|
||||
import SdkConfig from "../../../../../SdkConfig";
|
||||
import CallMediaHandler from "../../../../../CallMediaHandler";
|
||||
import MediaDeviceHandler from "../../../../../MediaDeviceHandler";
|
||||
import Field from "../../../elements/Field";
|
||||
import AccessibleButton from "../../../elements/AccessibleButton";
|
||||
import {MatrixClientPeg} from "../../../../../MatrixClientPeg";
|
||||
@ -41,7 +41,7 @@ export default class VoiceUserSettingsTab extends React.Component {
|
||||
}
|
||||
|
||||
async componentDidMount() {
|
||||
const canSeeDeviceLabels = await CallMediaHandler.hasAnyLabeledDevices();
|
||||
const canSeeDeviceLabels = await MediaDeviceHandler.hasAnyLabeledDevices();
|
||||
if (canSeeDeviceLabels) {
|
||||
this._refreshMediaDevices();
|
||||
}
|
||||
@ -49,10 +49,10 @@ export default class VoiceUserSettingsTab extends React.Component {
|
||||
|
||||
_refreshMediaDevices = async (stream) => {
|
||||
this.setState({
|
||||
mediaDevices: await CallMediaHandler.getDevices(),
|
||||
activeAudioOutput: CallMediaHandler.getAudioOutput(),
|
||||
activeAudioInput: CallMediaHandler.getAudioInput(),
|
||||
activeVideoInput: CallMediaHandler.getVideoInput(),
|
||||
mediaDevices: await MediaDeviceHandler.getDevices(),
|
||||
activeAudioOutput: MediaDeviceHandler.getAudioOutput(),
|
||||
activeAudioInput: MediaDeviceHandler.getAudioInput(),
|
||||
activeVideoInput: MediaDeviceHandler.getVideoInput(),
|
||||
});
|
||||
if (stream) {
|
||||
// kill stream (after we've enumerated the devices, otherwise we'd get empty labels again)
|
||||
@ -100,21 +100,21 @@ export default class VoiceUserSettingsTab extends React.Component {
|
||||
};
|
||||
|
||||
_setAudioOutput = (e) => {
|
||||
CallMediaHandler.setAudioOutput(e.target.value);
|
||||
MediaDeviceHandler.setAudioOutput(e.target.value);
|
||||
this.setState({
|
||||
activeAudioOutput: e.target.value,
|
||||
});
|
||||
};
|
||||
|
||||
_setAudioInput = (e) => {
|
||||
CallMediaHandler.setAudioInput(e.target.value);
|
||||
MediaDeviceHandler.setAudioInput(e.target.value);
|
||||
this.setState({
|
||||
activeAudioInput: e.target.value,
|
||||
});
|
||||
};
|
||||
|
||||
_setVideoInput = (e) => {
|
||||
CallMediaHandler.setVideoInput(e.target.value);
|
||||
MediaDeviceHandler.setVideoInput(e.target.value);
|
||||
this.setState({
|
||||
activeVideoInput: e.target.value,
|
||||
});
|
||||
@ -171,7 +171,7 @@ export default class VoiceUserSettingsTab extends React.Component {
|
||||
}
|
||||
};
|
||||
|
||||
const audioOutputs = this.state.mediaDevices.audiooutput.slice(0);
|
||||
const audioOutputs = this.state.mediaDevices.audioOutput.slice(0);
|
||||
if (audioOutputs.length > 0) {
|
||||
const defaultDevice = getDefaultDevice(audioOutputs);
|
||||
speakerDropdown = (
|
||||
@ -183,7 +183,7 @@ export default class VoiceUserSettingsTab extends React.Component {
|
||||
);
|
||||
}
|
||||
|
||||
const audioInputs = this.state.mediaDevices.audioinput.slice(0);
|
||||
const audioInputs = this.state.mediaDevices.audioInput.slice(0);
|
||||
if (audioInputs.length > 0) {
|
||||
const defaultDevice = getDefaultDevice(audioInputs);
|
||||
microphoneDropdown = (
|
||||
@ -195,7 +195,7 @@ export default class VoiceUserSettingsTab extends React.Component {
|
||||
);
|
||||
}
|
||||
|
||||
const videoInputs = this.state.mediaDevices.videoinput.slice(0);
|
||||
const videoInputs = this.state.mediaDevices.videoInput.slice(0);
|
||||
if (videoInputs.length > 0) {
|
||||
const defaultDevice = getDefaultDevice(videoInputs);
|
||||
webcamDropdown = (
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
import React, {createRef} from 'react';
|
||||
import { CallFeed, CallFeedEvent } from 'matrix-js-sdk/src/webrtc/callFeed';
|
||||
import { logger } from 'matrix-js-sdk/src/logger';
|
||||
import CallMediaHandler from "../../../CallMediaHandler";
|
||||
import MediaDeviceHandler from "../../../MediaDeviceHandler";
|
||||
|
||||
interface IProps {
|
||||
feed: CallFeed,
|
||||
@ -38,7 +38,7 @@ export default class AudioFeed extends React.Component<IProps> {
|
||||
|
||||
private playMedia() {
|
||||
const element = this.element.current;
|
||||
const audioOutput = CallMediaHandler.getAudioOutput();
|
||||
const audioOutput = MediaDeviceHandler.getAudioOutput();
|
||||
|
||||
if (audioOutput) {
|
||||
try {
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
import * as Recorder from 'opus-recorder';
|
||||
import encoderPath from 'opus-recorder/dist/encoderWorker.min.js';
|
||||
import {MatrixClient} from "matrix-js-sdk/src/client";
|
||||
import CallMediaHandler from "../CallMediaHandler";
|
||||
import MediaDeviceHandler from "../MediaDeviceHandler";
|
||||
import {SimpleObservable} from "matrix-widget-api";
|
||||
import {clamp, percentageOf, percentageWithin} from "../utils/numbers";
|
||||
import EventEmitter from "events";
|
||||
@ -97,7 +97,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
|
||||
audio: {
|
||||
channelCount: CHANNELS,
|
||||
noiseSuppression: true, // browsers ignore constraints they can't honour
|
||||
deviceId: CallMediaHandler.getAudioInput(),
|
||||
deviceId: MediaDeviceHandler.getAudioInput(),
|
||||
},
|
||||
});
|
||||
this.recorderContext = createAudioContext({
|
||||
|
Loading…
Reference in New Issue
Block a user