bigbluebutton-Github/bigbluebutton-html5/imports/ui/services/audio-manager/index.js

1214 lines
38 KiB
JavaScript
Raw Normal View History

2017-11-17 19:52:48 +08:00
import Auth from '/imports/ui/services/auth';
2017-10-13 03:22:10 +08:00
import SIPBridge from '/imports/api/audio/client/bridge/sip';
import SFUAudioBridge from '/imports/api/audio/client/bridge/sfu-audio-bridge';
import logger from '/imports/startup/client/logger';
import { notify } from '/imports/ui/services/notification';
2019-09-30 22:54:34 +08:00
import playAndRetry from '/imports/utils/mediaElementPlayRetry';
import { monitorAudioConnection } from '/imports/utils/stats';
import browserInfo from '/imports/utils/browserInfo';
import getFromMeetingSettings from '/imports/ui/services/meeting-settings';
import getFromUserSettings from '/imports/ui/services/users-settings';
import {
DEFAULT_INPUT_DEVICE_ID,
reloadAudioElement,
getCurrentAudioSinkId,
getStoredAudioInputDeviceId,
storeAudioInputDeviceId,
getStoredAudioOutputDeviceId,
storeAudioOutputDeviceId,
} from '/imports/api/audio/client/bridge/service';
import MediaStreamUtils from '/imports/utils/media-stream-utils';
2023-07-25 02:56:40 +08:00
import { makeVar } from '@apollo/client';
import AudioErrors from '/imports/ui/services/audio-manager/error-codes';
import Session from '/imports/ui/services/storage/in-memory';
2024-06-07 02:47:41 +08:00
import GrahqlSubscriptionStore, { stringToHash } from '/imports/ui/core/singletons/subscriptionStore';
import VOICE_ACTIVITY from '../../core/graphql/queries/whoIsTalking';
const DEFAULT_AUDIO_BRIDGES_PATH = '/imports/api/audio/client/';
const CALL_STATES = {
2017-10-05 04:49:11 +08:00
STARTED: 'started',
ENDED: 'ended',
FAILED: 'failed',
RECONNECTING: 'reconnecting',
AUTOPLAY_BLOCKED: 'autoplayBlocked',
2017-10-05 04:49:11 +08:00
};
const BREAKOUT_AUDIO_TRANSFER_STATES = {
CONNECTED: 'connected',
DISCONNECTED: 'disconnected',
RETURNING: 'returning',
};
/**
* Audio status to be filtered in getStats()
*/
const FILTER_AUDIO_STATS = [
'outbound-rtp',
'inbound-rtp',
'candidate-pair',
'local-candidate',
'transport',
];
class AudioManager {
constructor() {
this._breakoutAudioTransferStatus = {
status: BREAKOUT_AUDIO_TRANSFER_STATES.DISCONNECTED,
breakoutMeetingId: null,
};
this.defineProperties({
isMuted: makeVar(true),
2023-07-25 02:56:40 +08:00
isConnected: makeVar(false),
isConnecting: makeVar(false),
isHangingUp: makeVar(false),
isListenOnly: makeVar(false),
isEchoTest: makeVar(false),
isTalking: makeVar(false),
isWaitingPermissions: makeVar(false),
error: makeVar(null),
autoplayBlocked: makeVar(false),
isReconnecting: makeVar(false),
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
bypassGUM: makeVar(false),
permissionStatus: makeVar(null),
transparentListenOnlySupported: makeVar(false),
});
this.failedMediaElements = [];
this.handlePlayElementFailed = this.handlePlayElementFailed.bind(this);
this.monitor = this.monitor.bind(this);
this.isUsingAudio = this.isUsingAudio.bind(this);
2023-07-25 02:56:40 +08:00
this._inputStream = makeVar(null);
this._inputDeviceId = {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
value: makeVar(null),
};
this._outputDeviceId = {
value: makeVar(null),
};
fix: mic selection (firefox/all browsers) and muted alert when mic is changed This commit contains three fixes: one already reported and two detected during the investigation of the solution. This started as a fix for firefox (#12023), but i also fixed the muted alert/banner when device changes: the banner wasn't detecting device changes, unless audio was deactived/actived. There's another fix for the microphone stream: we now keep sender's track disabled if it was already disabled for the sender's track of the previous selected device. Also did small refactor for eslint checking. Some technical information: in sip bridge (bridge/sip.js), setInputStream and liveChangeInputDevice function were both fully turned into promises, which guarantees we have everything ready when it resolves to the respective values. This helps AudioManager (audio-manager/index.js) to sequentially sets and tracks the state of the current microphone stream (inputStream), when calling liveChangeInputDevice function: we first set the current stream to null, creats a new one and then set it to the newly created value - this is needed because MutedAlert (muted-alert/component.jsx) can then gracefully allocate/deallocate the cloned stream when it is set to a non-null/null value (the cloned stream is used for speech detection with hark). In MutedAlert we also make sure to enable the cloned stream's audio tracks, just in case the user change the device when muted (audio track is disabled in this case), which also leaves the cloned stream muted (we then enable the track to allow speech detection). Closes #12023
2021-04-16 21:45:40 +08:00
this.BREAKOUT_AUDIO_TRANSFER_STATES = BREAKOUT_AUDIO_TRANSFER_STATES;
this._voiceActivityObserver = null;
window.addEventListener('StopAudioTracks', () => this.forceExitAudio());
}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
_trackPermissionStatus() {
const handleTrackingError = (error) => {
logger.warn({
logCode: 'audiomanager_permission_tracking_failed',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
},
}, `Failed to track microphone permission status: ${error.message}`);
};
if (navigator?.permissions?.query) {
navigator.permissions.query({ name: 'microphone' })
.then((status) => {
// eslint-disable-next-line no-param-reassign
status.onchange = () => {
logger.debug({
logCode: 'audiomanager_permission_status_changed',
extraInfo: {
newStatus: status.state,
},
}, `Microphone permission status changed: ${status.state}`);
this.permissionStatus = status.state;
};
this.permissionStatus = status.state;
}).catch(handleTrackingError);
} else {
handleTrackingError(new Error('navigator.permissions.query is not available'));
}
}
_applyCachedOutputDeviceId() {
const cachedId = getStoredAudioOutputDeviceId();
if (typeof cachedId === 'string') {
2023-07-25 02:56:40 +08:00
this.changeOutputDevice(cachedId, false)
.then(() => {
this.outputDeviceId = cachedId;
})
.catch((error) => {
logger.warn(
{
logCode: 'audiomanager_output_device_storage_failed',
extraInfo: {
deviceId: cachedId,
errorMessage: error.message,
},
},
`Failed to apply output audio device from storage: ${error.message}`
);
});
}
}
set inputDeviceId(value) {
2023-07-25 02:56:40 +08:00
if (this._inputDeviceId.value() !== value) {
this._inputDeviceId.value(value);
}
if (this.fullAudioBridge) {
2023-07-25 02:56:40 +08:00
this.fullAudioBridge.inputDeviceId = this._inputDeviceId.value();
}
}
// inputDeviceId is a string that represents a MediaDeviceInfo.deviceId OR a static
// 'listen-only' string that represents our "virtual" listen-only device.
// i.e.: the user has a bidirectional audio channel, but did not specify any
// input device to it.
get inputDeviceId() {
2023-07-25 02:56:40 +08:00
return this._inputDeviceId.value();
}
set outputDeviceId(value) {
2023-07-25 02:56:40 +08:00
if (this._outputDeviceId.value() !== value) {
this._outputDeviceId.value(value);
}
if (this.fullAudioBridge) {
2023-07-25 02:56:40 +08:00
this.fullAudioBridge.outputDeviceId = this._outputDeviceId.value();
}
if (this.listenOnlyBridge) {
2023-07-25 02:56:40 +08:00
this.listenOnlyBridge.outputDeviceId = this._outputDeviceId.value();
}
}
get outputDeviceId() {
2023-07-25 02:56:40 +08:00
return this._outputDeviceId.value();
}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
shouldBypassGUM() {
return this.supportsTransparentListenOnly() && this.inputDeviceId === 'listen-only';
}
supportsTransparentListenOnly() {
return this.listenOnlyBridge?.supportsTransparentListenOnly()
&& this.fullAudioBridge?.supportsTransparentListenOnly();
}
observeVoiceActivity() {
// Observe voice activity changes to update any relevant *local* states
// (see onVoiceUserChanges)
if (!this._voiceActivityObserver) {
const subHash = stringToHash(JSON.stringify({
subscription: VOICE_ACTIVITY,
}));
this._voiceActivityObserver = GrahqlSubscriptionStore.makeSubscription(VOICE_ACTIVITY);
window.addEventListener('graphqlSubscription', (e) => {
const { subscriptionHash, response } = e.detail;
if (subscriptionHash === subHash) {
if (response) {
const { data } = response;
const voiceUser = data.user_voice_activity_stream.find((v) => v.userId === Auth.userID);
this.onVoiceUserChanges(voiceUser);
}
}
});
}
}
init(userData, audioEventHandler) {
this.userData = userData;
this.inputDeviceId = getStoredAudioInputDeviceId() || DEFAULT_INPUT_DEVICE_ID;
this.outputDeviceId = getCurrentAudioSinkId();
this._applyCachedOutputDeviceId();
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
this._trackPermissionStatus();
this.loadBridges(userData);
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
this.transparentListenOnlySupported = this.supportsTransparentListenOnly();
this.audioEventHandler = audioEventHandler;
this.initialized = true;
2017-10-18 03:16:42 +08:00
}
2018-12-22 01:14:05 +08:00
/**
* Load audio bridges modules to be used the manager.
*
* Bridges can be configured in settings.yml file.
* @param {Object} userData The Object representing user data to be passed to
* the bridge.
*/
loadBridges(userData) {
let FullAudioBridge = SIPBridge;
let ListenOnlyBridge = SFUAudioBridge;
const MEDIA = window.meetingClientSettings.public.media;
if (MEDIA.audio) {
Refactor: Make bundle using webpack (#20811) * Refactor: Make bundle using webpack * Fix: restore after install codes and a few settings * Fix: build script folder permission * Refactor: Remove support to async import on audio bridges * Upgrade npm using nvm * Avoid questions on npm ci execution * Let npm ci install dev dependencies (as we need the build tools here) * Fix: enconding * Fix: old lock files * Remove: bbb-config dependency to bbb-html5 service, bbb-html5 isn't a service anymore * Fix: TS errors * Fix: eslint * Fix: chat styles * npm install with "lockfileVersion": 3 (newer npm) * build: allow nodejs 22 * node 22; drop meteor from CI and bbb-conf * TEMP: use bbb-install without mongo but with node 22 and newer image * build: relax nodejs condition to not trip 22.6 * build: ensure dir /usr/share/bigbluebutton/nginx exists * init sites-available/bbb; drop disable-transparent- * nginx complaining of missing file and ; * TMP: print status of services * WIP: tweak nginx location to debug * Fix: webcam widgets alignments * akka-apps -- update location of settings.yml * build: add locales path for nginx * docs and config changes for removal of meteor * Fix: build encoding and locales enpoint folder path * build: set wss url for media * Add: Enable minimizer and modify to Terser * Fix: TS errors --------- Co-authored-by: Tiago Jacobs <tiago.jacobs@gmail.com> Co-authored-by: Anton Georgiev <anto.georgiev@gmail.com> Co-authored-by: Anton Georgiev <antobinary@users.noreply.github.com>
2024-08-10 01:58:44 +08:00
const { defaultFullAudioBridge, defaultListenOnlyBridge } = MEDIA.audio;
const _fullAudioBridge = getFromUserSettings(
'bbb_fullaudio_bridge',
Refactor: Make bundle using webpack (#20811) * Refactor: Make bundle using webpack * Fix: restore after install codes and a few settings * Fix: build script folder permission * Refactor: Remove support to async import on audio bridges * Upgrade npm using nvm * Avoid questions on npm ci execution * Let npm ci install dev dependencies (as we need the build tools here) * Fix: enconding * Fix: old lock files * Remove: bbb-config dependency to bbb-html5 service, bbb-html5 isn't a service anymore * Fix: TS errors * Fix: eslint * Fix: chat styles * npm install with "lockfileVersion": 3 (newer npm) * build: allow nodejs 22 * node 22; drop meteor from CI and bbb-conf * TEMP: use bbb-install without mongo but with node 22 and newer image * build: relax nodejs condition to not trip 22.6 * build: ensure dir /usr/share/bigbluebutton/nginx exists * init sites-available/bbb; drop disable-transparent- * nginx complaining of missing file and ; * TMP: print status of services * WIP: tweak nginx location to debug * Fix: webcam widgets alignments * akka-apps -- update location of settings.yml * build: add locales path for nginx * docs and config changes for removal of meteor * Fix: build encoding and locales enpoint folder path * build: set wss url for media * Add: Enable minimizer and modify to Terser * Fix: TS errors --------- Co-authored-by: Tiago Jacobs <tiago.jacobs@gmail.com> Co-authored-by: Anton Georgiev <anto.georgiev@gmail.com> Co-authored-by: Anton Georgiev <antobinary@users.noreply.github.com>
2024-08-10 01:58:44 +08:00
getFromMeetingSettings('fullaudio-bridge', defaultFullAudioBridge),
);
Refactor: Make bundle using webpack (#20811) * Refactor: Make bundle using webpack * Fix: restore after install codes and a few settings * Fix: build script folder permission * Refactor: Remove support to async import on audio bridges * Upgrade npm using nvm * Avoid questions on npm ci execution * Let npm ci install dev dependencies (as we need the build tools here) * Fix: enconding * Fix: old lock files * Remove: bbb-config dependency to bbb-html5 service, bbb-html5 isn't a service anymore * Fix: TS errors * Fix: eslint * Fix: chat styles * npm install with "lockfileVersion": 3 (newer npm) * build: allow nodejs 22 * node 22; drop meteor from CI and bbb-conf * TEMP: use bbb-install without mongo but with node 22 and newer image * build: relax nodejs condition to not trip 22.6 * build: ensure dir /usr/share/bigbluebutton/nginx exists * init sites-available/bbb; drop disable-transparent- * nginx complaining of missing file and ; * TMP: print status of services * WIP: tweak nginx location to debug * Fix: webcam widgets alignments * akka-apps -- update location of settings.yml * build: add locales path for nginx * docs and config changes for removal of meteor * Fix: build encoding and locales enpoint folder path * build: set wss url for media * Add: Enable minimizer and modify to Terser * Fix: TS errors --------- Co-authored-by: Tiago Jacobs <tiago.jacobs@gmail.com> Co-authored-by: Anton Georgiev <anto.georgiev@gmail.com> Co-authored-by: Anton Georgiev <antobinary@users.noreply.github.com>
2024-08-10 01:58:44 +08:00
this.bridges = {
[_fullAudioBridge]: SIPBridge,
[defaultListenOnlyBridge]: SFUAudioBridge,
};
2021-12-10 04:37:05 +08:00
if (_fullAudioBridge && this.bridges[_fullAudioBridge]) {
FullAudioBridge = this.bridges[_fullAudioBridge];
}
2021-12-10 04:37:05 +08:00
if (defaultListenOnlyBridge && this.bridges[defaultListenOnlyBridge]) {
ListenOnlyBridge = this.bridges[defaultListenOnlyBridge];
}
}
this.fullAudioBridge = new FullAudioBridge(userData);
this.listenOnlyBridge = new ListenOnlyBridge(userData);
// Initialize device IDs in configured bridges
this.fullAudioBridge.inputDeviceId = this.inputDeviceId;
this.fullAudioBridge.outputDeviceId = this.outputDeviceId;
this.listenOnlyBridge.outputDeviceId = this.outputDeviceId;
}
setAudioMessages(messages, intl) {
this.messages = messages;
this.intl = intl;
}
2017-10-18 03:16:42 +08:00
defineProperties(obj) {
2017-09-29 21:38:10 +08:00
Object.keys(obj).forEach((key) => {
const privateKey = `_${key}`;
this[privateKey] = {
value: obj[key],
2017-09-29 21:38:10 +08:00
};
Object.defineProperty(this, key, {
set: (value) => {
this[privateKey].value(value);
2017-09-29 21:38:10 +08:00
},
get: () => this[privateKey].value(),
2023-07-25 02:56:40 +08:00
[`getReferece${key}`]: () => this[privateKey],
2017-09-29 21:38:10 +08:00
});
});
}
async trickleIce() {
const { isFirefox, isIe, isSafari } = browserInfo;
2023-07-25 02:56:40 +08:00
if (
!this.listenOnlyBridge ||
typeof this.listenOnlyBridge.trickleIce !== 'function' ||
isFirefox ||
isIe ||
isSafari
) {
return [];
}
if (this.validIceCandidates && this.validIceCandidates.length) {
2021-12-10 04:37:05 +08:00
logger.info(
{ logCode: 'audiomanager_trickle_ice_reuse_candidate' },
2023-07-25 02:56:40 +08:00
'Reusing trickle ICE information before activating microphone'
2021-12-10 04:37:05 +08:00
);
return this.validIceCandidates;
}
2021-12-10 04:37:05 +08:00
logger.info(
{ logCode: 'audiomanager_trickle_ice_get_local_candidate' },
2023-07-25 02:56:40 +08:00
'Performing trickle ICE before activating microphone'
2021-12-10 04:37:05 +08:00
);
try {
this.validIceCandidates = await this.listenOnlyBridge.trickleIce();
return this.validIceCandidates;
} catch (error) {
2023-07-25 02:56:40 +08:00
logger.error(
{
logCode: 'audiomanager_trickle_ice_failed',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
},
},
2023-07-25 02:56:40 +08:00
`Trickle ICE before activating microphone failed: ${error.message}`
);
return [];
}
}
joinMicrophone() {
this.audioJoinStartTime = new Date();
this.logAudioJoinTime = false;
this.isListenOnly = false;
this.isEchoTest = false;
2021-12-10 04:37:05 +08:00
return this.onAudioJoining
.bind(this)()
.then(() => {
const callOptions = {
isListenOnly: false,
extension: null,
inputStream: this.inputStream,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
bypassGUM: this.shouldBypassGUM(),
};
return this.joinAudio(callOptions, this.callStateCallback.bind(this));
});
}
joinEchoTest() {
this.audioJoinStartTime = new Date();
this.logAudioJoinTime = false;
this.isListenOnly = false;
this.isEchoTest = true;
const MEDIA = window.meetingClientSettings.public.media;
const ECHO_TEST_NUMBER = MEDIA.echoTestNumber;
const EXPERIMENTAL_USE_KMS_TRICKLE_ICE_FOR_MICROPHONE =
window.meetingClientSettings.public.app.experimentalUseKmsTrickleIceForMicrophone;
2021-12-10 04:37:05 +08:00
return this.onAudioJoining
.bind(this)()
.then(async () => {
let validIceCandidates = [];
if (EXPERIMENTAL_USE_KMS_TRICKLE_ICE_FOR_MICROPHONE) {
validIceCandidates = await this.trickleIce();
}
const callOptions = {
isListenOnly: false,
extension: ECHO_TEST_NUMBER,
inputStream: this.inputStream,
validIceCandidates,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
bypassGUM: this.shouldBypassGUM(),
};
2021-12-10 04:37:05 +08:00
logger.info(
{
logCode: 'audiomanager_join_echotest',
extraInfo: { logType: 'user_action' },
},
'User requested to join audio conference with mic'
);
return this.joinAudio(callOptions, this.callStateCallback.bind(this));
});
}
joinAudio(callOptions, callStateCallback) {
return this.bridge
.joinAudio(callOptions, callStateCallback.bind(this))
.catch((error) => {
const { name, message } = error;
const errorPayload = {
type: 'MEDIA_ERROR',
errMessage: message || 'MEDIA_ERROR',
errCode: AudioErrors.MIC_ERROR.UNKNOWN,
};
switch (name) {
case 'NotAllowedError':
errorPayload.errCode = AudioErrors.MIC_ERROR.NO_PERMISSION;
logger.error({
logCode: 'audiomanager_error_getting_device',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
},
2021-12-10 04:37:05 +08:00
},
`Error getting microphone - {${error.name}: ${error.message}}`,
);
break;
case 'NotFoundError':
errorPayload.errCode = AudioErrors.MIC_ERROR.DEVICE_NOT_FOUND;
logger.error({
logCode: 'audiomanager_error_device_not_found',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
},
},
`Error getting microphone - {${error.name}: ${error.message}}`,
);
break;
default:
logger.error({
logCode: 'audiomanager_error_unknown',
2023-07-25 02:56:40 +08:00
extraInfo: {
errorName: error.name,
errorMessage: error.message,
2021-12-10 04:37:05 +08:00
},
}, `Error enabling audio - {${name}: ${message}}`);
break;
}
this.isConnecting = false;
throw errorPayload;
});
}
async joinListenOnly() {
this.audioJoinStartTime = new Date();
this.logAudioJoinTime = false;
this.isListenOnly = true;
this.isEchoTest = false;
logger.info({
logCode: 'audiomanager_join_listenonly',
extraInfo: { logType: 'user_action' },
}, 'user requested to connect to audio conference as listen only');
window.addEventListener('audioPlayFailed', this.handlePlayElementFailed);
return this.onAudioJoining.bind(this)()
.then(() => {
const callOptions = {
isListenOnly: true,
extension: null,
};
return this.joinAudio(callOptions, this.callStateCallback.bind(this));
});
}
onAudioJoining() {
this.isConnecting = true;
this.isMuted = true;
this.error = false;
this.observeVoiceActivity();
// Ensure the local mute state (this.isMuted) is aligned with the initial
// placeholder value before joining audio.
// Currently, the server sets the placeholder mute state to *true*, and this
// is only communicated via observeVoiceActivity's subscription if the initial
// state differs from the placeholder or when the state changes.
// Refer to user_voice_activity DB schema for details.
// tl;dr: without enforcing the initial mute state here, the client won't be
// locally muted if the audio channel starts muted (e.g., dialplan-level
// muteOnStart).
this.setSenderTrackEnabled(!this.isMuted);
return Promise.resolve();
}
exitAudio() {
if (!this.isConnected) return Promise.resolve();
2017-10-27 01:14:56 +08:00
this.isHangingUp = true;
return this.bridge.exitAudio();
}
forceExitAudio() {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
this.onAudioExit();
return this.bridge && this.bridge.exitAudio();
}
transferCall() {
2017-10-18 03:16:42 +08:00
this.onTransferStart();
return this.bridge.transferCall(this.onAudioJoin.bind(this));
}
2024-06-11 00:14:50 +08:00
onVoiceUserChanges(fields = {}) {
if (fields.muted !== undefined && fields.muted !== this.isMuted) {
let muteState;
this.isMuted = fields.muted;
if (this.isMuted) {
muteState = 'selfMuted';
this.mute();
} else {
muteState = 'selfUnmuted';
this.unmute();
}
}
if (fields.talking !== undefined && fields.talking !== this.isTalking) {
this.isTalking = fields.talking;
}
if (this.isMuted) {
this.isTalking = false;
}
}
onAudioJoin() {
this.isConnected = true;
this.isConnecting = false;
2017-10-20 18:11:51 +08:00
const STATS = window.meetingClientSettings.public.stats;
2023-07-25 02:56:40 +08:00
const secondsToActivateAudio = (new Date() - this.audioJoinStartTime) / 1000;
if (!this.logAudioJoinTime) {
this.logAudioJoinTime = true;
2021-12-10 04:37:05 +08:00
logger.info(
{
logCode: 'audio_mic_join_time',
extraInfo: {
secondsToActivateAudio,
},
},
2021-12-10 04:37:05 +08:00
`Time needed to connect audio (seconds): ${secondsToActivateAudio}`
);
}
2018-01-16 05:01:57 +08:00
try {
this.inputStream = this.bridge ? this.bridge.inputStream : null;
// Enforce correct output device on audio join
this.changeOutputDevice(this.outputDeviceId, true);
storeAudioOutputDeviceId(this.outputDeviceId);
// Extract the deviceId again from the stream to guarantee consistency
// between stream DID vs chosen DID. That's necessary in scenarios where,
// eg, there's no default/pre-set deviceId ('') and the browser's
// default device has been altered by the user (browser default != system's
// default).
if (this.inputStream) {
const extractedDeviceId = MediaStreamUtils.extractDeviceIdFromStream(
this.inputStream,
'audio',
);
if (extractedDeviceId && extractedDeviceId !== this.inputDeviceId) {
this.changeInputDevice(extractedDeviceId);
}
}
// Audio joined successfully - add device IDs to session storage so they
// can be re-used on refreshes/other sessions
storeAudioInputDeviceId(this.inputDeviceId);
} catch (error) {
logger.warn({
logCode: 'audiomanager_device_enforce_failed',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
inputDeviceId: this.inputDeviceId,
outputDeviceId: this.outputDeviceId,
},
}, `Failed to enforce input/output devices: ${error.message}`);
}
2017-10-20 18:11:51 +08:00
if (!this.isEchoTest) {
this.notify(this.intl.formatMessage(this.messages.info.JOINED_AUDIO));
logger.info({
logCode: 'audio_joined',
extraInfo: {
secondsToActivateAudio,
inputDeviceId: this.inputDeviceId,
outputDeviceId: this.outputDeviceId,
isListenOnly: this.isListenOnly,
},
}, 'Audio Joined');
2020-01-28 21:07:21 +08:00
if (STATS.enabled) this.monitor();
this.audioEventHandler({
name: 'started',
isListenOnly: this.isListenOnly,
});
2017-10-20 18:11:51 +08:00
}
Session.setItem('audioModalIsOpen', false);
}
onTransferStart() {
this.isEchoTest = false;
this.isConnecting = true;
}
// Must be called before the call is actually torn down (this.isConnected = true)
notifyAudioExit() {
try {
if (!this.error && (this.isConnected && !this.isEchoTest)) {
this.notify(
this.intl.formatMessage(this.messages.info.LEFT_AUDIO),
false,
'no_audio',
);
}
} catch {}
}
onAudioExit() {
this.notifyAudioExit();
this.isConnected = false;
2017-10-05 04:49:11 +08:00
this.isConnecting = false;
2017-10-27 01:14:56 +08:00
this.isHangingUp = false;
this.autoplayBlocked = false;
this.failedMediaElements = [];
2017-09-29 21:38:10 +08:00
2018-04-18 01:09:05 +08:00
if (this.inputStream) {
fix: mic selection (firefox/all browsers) and muted alert when mic is changed This commit contains three fixes: one already reported and two detected during the investigation of the solution. This started as a fix for firefox (#12023), but i also fixed the muted alert/banner when device changes: the banner wasn't detecting device changes, unless audio was deactived/actived. There's another fix for the microphone stream: we now keep sender's track disabled if it was already disabled for the sender's track of the previous selected device. Also did small refactor for eslint checking. Some technical information: in sip bridge (bridge/sip.js), setInputStream and liveChangeInputDevice function were both fully turned into promises, which guarantees we have everything ready when it resolves to the respective values. This helps AudioManager (audio-manager/index.js) to sequentially sets and tracks the state of the current microphone stream (inputStream), when calling liveChangeInputDevice function: we first set the current stream to null, creats a new one and then set it to the newly created value - this is needed because MutedAlert (muted-alert/component.jsx) can then gracefully allocate/deallocate the cloned stream when it is set to a non-null/null value (the cloned stream is used for speech detection with hark). In MutedAlert we also make sure to enable the cloned stream's audio tracks, just in case the user change the device when muted (audio track is disabled in this case), which also leaves the cloned stream muted (we then enable the track to allow speech detection). Closes #12023
2021-04-16 21:45:40 +08:00
this.inputStream.getTracks().forEach((track) => track.stop());
this.inputStream = null;
2018-04-18 01:09:05 +08:00
}
2018-04-13 20:39:26 +08:00
if (!this.isEchoTest) {
this.playHangUpSound();
}
window.removeEventListener('audioPlayFailed', this.handlePlayElementFailed);
}
2017-10-05 04:49:11 +08:00
callStateCallback(response) {
2017-09-29 21:38:10 +08:00
return new Promise((resolve) => {
2023-07-25 02:56:40 +08:00
const { STARTED, ENDED, FAILED, RECONNECTING, AUTOPLAY_BLOCKED } = CALL_STATES;
2021-12-10 04:37:05 +08:00
2023-07-25 02:56:40 +08:00
const { status, error, bridgeError, silenceNotifications, bridge } = response;
2017-10-05 04:49:11 +08:00
if (status === STARTED) {
this.isReconnecting = false;
2017-09-29 21:38:10 +08:00
this.onAudioJoin();
2017-10-05 04:49:11 +08:00
resolve(STARTED);
} else if (status === ENDED) {
this.isReconnecting = false;
this.setBreakoutAudioTransferStatus({
breakoutMeetingId: '',
status: BREAKOUT_AUDIO_TRANSFER_STATES.DISCONNECTED,
});
2019-06-05 02:16:43 +08:00
logger.info({ logCode: 'audio_ended' }, 'Audio ended without issue');
2017-09-29 21:38:10 +08:00
this.onAudioExit();
2017-10-05 04:49:11 +08:00
} else if (status === FAILED) {
this.isReconnecting = false;
this.setBreakoutAudioTransferStatus({
breakoutMeetingId: '',
status: BREAKOUT_AUDIO_TRANSFER_STATES.DISCONNECTED,
2021-12-10 04:37:05 +08:00
});
2023-07-25 02:56:40 +08:00
const errorKey = this.messages.error[error] || this.messages.error.GENERIC_ERROR;
const errorMsg = this.intl.formatMessage(errorKey, { 0: bridgeError });
this.error = !!error;
2021-12-10 04:37:05 +08:00
logger.error(
{
logCode: 'audio_failure',
extraInfo: {
errorCode: error,
cause: bridgeError,
bridge,
inputDeviceId: this.inputDeviceId,
outputDeviceId: this.outputDeviceId,
isListenOnly: this.isListenOnly,
2021-12-10 04:37:05 +08:00
},
},
2021-12-10 04:37:05 +08:00
`Audio error - errorCode=${error}, cause=${bridgeError}`
);
if (silenceNotifications !== true) {
this.notify(errorMsg, true);
this.exitAudio();
this.onAudioExit();
}
} else if (status === RECONNECTING) {
this.isReconnecting = true;
this.setBreakoutAudioTransferStatus({
breakoutMeetingId: '',
status: BREAKOUT_AUDIO_TRANSFER_STATES.DISCONNECTED,
2021-12-10 04:37:05 +08:00
});
2023-07-25 02:56:40 +08:00
logger.info({ logCode: 'audio_reconnecting' }, 'Attempting to reconnect audio');
this.notify(this.intl.formatMessage(this.messages.info.RECONNECTING_AUDIO), true);
this.playHangUpSound();
} else if (status === AUTOPLAY_BLOCKED) {
this.setBreakoutAudioTransferStatus({
breakoutMeetingId: '',
status: BREAKOUT_AUDIO_TRANSFER_STATES.DISCONNECTED,
2021-12-10 04:37:05 +08:00
});
this.isReconnecting = false;
this.autoplayBlocked = true;
this.onAudioJoin();
resolve(AUTOPLAY_BLOCKED);
2017-09-29 21:38:10 +08:00
}
});
2017-09-29 21:38:10 +08:00
}
isUsingAudio() {
return Boolean(this.isConnected || this.isConnecting || this.isHangingUp);
}
changeInputDevice(deviceId) {
if (deviceId === this.inputDeviceId) return this.inputDeviceId;
const currentDeviceId = this.inputDeviceId ?? 'none';
this.inputDeviceId = deviceId;
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
logger.debug({
logCode: 'audiomanager_input_device_change',
extraInfo: {
deviceId: currentDeviceId,
newDeviceId: deviceId || 'none',
},
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
}, `Microphone input device changed: from ${currentDeviceId} to ${deviceId || 'none'}`);
return this.inputDeviceId;
2017-10-18 03:16:42 +08:00
}
2017-09-30 04:42:34 +08:00
2020-07-28 03:49:26 +08:00
liveChangeInputDevice(deviceId) {
const currentDeviceId = this.inputDeviceId ?? 'none';
fix: mic selection (firefox/all browsers) and muted alert when mic is changed This commit contains three fixes: one already reported and two detected during the investigation of the solution. This started as a fix for firefox (#12023), but i also fixed the muted alert/banner when device changes: the banner wasn't detecting device changes, unless audio was deactived/actived. There's another fix for the microphone stream: we now keep sender's track disabled if it was already disabled for the sender's track of the previous selected device. Also did small refactor for eslint checking. Some technical information: in sip bridge (bridge/sip.js), setInputStream and liveChangeInputDevice function were both fully turned into promises, which guarantees we have everything ready when it resolves to the respective values. This helps AudioManager (audio-manager/index.js) to sequentially sets and tracks the state of the current microphone stream (inputStream), when calling liveChangeInputDevice function: we first set the current stream to null, creats a new one and then set it to the newly created value - this is needed because MutedAlert (muted-alert/component.jsx) can then gracefully allocate/deallocate the cloned stream when it is set to a non-null/null value (the cloned stream is used for speech detection with hark). In MutedAlert we also make sure to enable the cloned stream's audio tracks, just in case the user change the device when muted (audio track is disabled in this case), which also leaves the cloned stream muted (we then enable the track to allow speech detection). Closes #12023
2021-04-16 21:45:40 +08:00
// we force stream to be null, so MutedAlert will deallocate it and
// a new one will be created for the new stream
this.inputStream = null;
2023-07-25 02:56:40 +08:00
return this.bridge
.liveChangeInputDevice(deviceId)
.then((stream) => {
this.inputStream = stream;
const extractedDeviceId = MediaStreamUtils.extractDeviceIdFromStream(
this.inputStream,
'audio'
);
if (extractedDeviceId && extractedDeviceId !== this.inputDeviceId) {
this.changeInputDevice(extractedDeviceId);
}
// Live input device change - add device ID to session storage so it
// can be re-used on refreshes/other sessions
storeAudioInputDeviceId(extractedDeviceId);
this.setSenderTrackEnabled(!this.isMuted);
})
.catch((error) => {
logger.error(
{
logCode: 'audiomanager_input_live_device_change_failure',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
deviceId: currentDeviceId,
newDeviceId: deviceId,
},
},
`Input device live change failed - {${error.name}: ${error.message}}`
);
2023-07-25 02:56:40 +08:00
throw error;
});
2017-09-30 04:42:34 +08:00
}
async changeOutputDevice(deviceId, isLive) {
const targetDeviceId = deviceId;
const currentDeviceId = this.outputDeviceId ?? getCurrentAudioSinkId();
const MEDIA = window.meetingClientSettings.public.media;
const MEDIA_TAG = MEDIA.mediaTag;
const audioElement = document.querySelector(MEDIA_TAG);
const sinkIdSupported = audioElement && typeof audioElement.setSinkId === 'function';
if (typeof deviceId === 'string' && sinkIdSupported && currentDeviceId !== targetDeviceId) {
try {
if (!isLive) audioElement.srcObject = null;
await audioElement.setSinkId(deviceId);
reloadAudioElement(audioElement);
2023-07-25 02:56:40 +08:00
logger.debug(
{
logCode: 'audiomanager_output_device_change',
extraInfo: {
deviceId: currentDeviceId,
newDeviceId: deviceId,
},
},
2023-07-25 02:56:40 +08:00
`Audio output device changed: from ${currentDeviceId || 'default'} to ${
deviceId || 'default'
}`
);
this.outputDeviceId = deviceId;
// Live output device change - add device ID to session storage so it
// can be re-used on refreshes/other sessions
if (isLive) storeAudioOutputDeviceId(deviceId);
return this.outputDeviceId;
} catch (error) {
2023-07-25 02:56:40 +08:00
logger.error(
{
logCode: 'audiomanager_output_device_change_failure',
extraInfo: {
errorName: error.name,
errorMessage: error.message,
deviceId: currentDeviceId,
newDeviceId: targetDeviceId,
},
},
2023-07-25 02:56:40 +08:00
`Error changing output device - {${error.name}: ${error.message}}`
);
// Rollback/enforce current sinkId (if possible)
if (sinkIdSupported) {
this.outputDeviceId = getCurrentAudioSinkId();
} else {
this.outputDeviceId = currentDeviceId;
}
throw error;
}
}
return this.outputDeviceId;
}
get inputStream() {
2023-07-25 02:56:40 +08:00
return this._inputStream();
fix: mic selection (firefox/all browsers) and muted alert when mic is changed This commit contains three fixes: one already reported and two detected during the investigation of the solution. This started as a fix for firefox (#12023), but i also fixed the muted alert/banner when device changes: the banner wasn't detecting device changes, unless audio was deactived/actived. There's another fix for the microphone stream: we now keep sender's track disabled if it was already disabled for the sender's track of the previous selected device. Also did small refactor for eslint checking. Some technical information: in sip bridge (bridge/sip.js), setInputStream and liveChangeInputDevice function were both fully turned into promises, which guarantees we have everything ready when it resolves to the respective values. This helps AudioManager (audio-manager/index.js) to sequentially sets and tracks the state of the current microphone stream (inputStream), when calling liveChangeInputDevice function: we first set the current stream to null, creats a new one and then set it to the newly created value - this is needed because MutedAlert (muted-alert/component.jsx) can then gracefully allocate/deallocate the cloned stream when it is set to a non-null/null value (the cloned stream is used for speech detection with hark). In MutedAlert we also make sure to enable the cloned stream's audio tracks, just in case the user change the device when muted (audio track is disabled in this case), which also leaves the cloned stream muted (we then enable the track to allow speech detection). Closes #12023
2021-04-16 21:45:40 +08:00
}
get bridge() {
return this.isListenOnly ? this.listenOnlyBridge : this.fullAudioBridge;
}
fix: mic selection (firefox/all browsers) and muted alert when mic is changed This commit contains three fixes: one already reported and two detected during the investigation of the solution. This started as a fix for firefox (#12023), but i also fixed the muted alert/banner when device changes: the banner wasn't detecting device changes, unless audio was deactived/actived. There's another fix for the microphone stream: we now keep sender's track disabled if it was already disabled for the sender's track of the previous selected device. Also did small refactor for eslint checking. Some technical information: in sip bridge (bridge/sip.js), setInputStream and liveChangeInputDevice function were both fully turned into promises, which guarantees we have everything ready when it resolves to the respective values. This helps AudioManager (audio-manager/index.js) to sequentially sets and tracks the state of the current microphone stream (inputStream), when calling liveChangeInputDevice function: we first set the current stream to null, creats a new one and then set it to the newly created value - this is needed because MutedAlert (muted-alert/component.jsx) can then gracefully allocate/deallocate the cloned stream when it is set to a non-null/null value (the cloned stream is used for speech detection with hark). In MutedAlert we also make sure to enable the cloned stream's audio tracks, just in case the user change the device when muted (audio track is disabled in this case), which also leaves the cloned stream muted (we then enable the track to allow speech detection). Closes #12023
2021-04-16 21:45:40 +08:00
set inputStream(stream) {
// We store reactive information about input stream
// because mutedalert component needs to track when it changes
// and then update hark with the new value for inputStream
2023-07-25 02:56:40 +08:00
this._inputStream(stream);
2017-09-30 04:42:34 +08:00
}
/**
* Sets the current status for breakout audio transfer
* @param {Object} newStatus The status Object to be set for
* audio transfer.
* @param {string} newStatus.breakoutMeetingId The meeting id of the current
* breakout audio transfer.
* @param {string} newStatus.status The status of the current audio
* transfer. Valid values are
* 'connected', 'disconnected' and
* 'returning'.
*/
setBreakoutAudioTransferStatus(newStatus) {
const currentStatus = this._breakoutAudioTransferStatus;
const { breakoutMeetingId, status } = newStatus;
if (typeof breakoutMeetingId === 'string') {
currentStatus.breakoutMeetingId = breakoutMeetingId;
2023-07-31 22:24:25 +08:00
} else {
currentStatus.breakoutMeetingId = null;
}
if (typeof status === 'string') {
currentStatus.status = status;
if (this.bridge && !this.isListenOnly) {
if (status !== BREAKOUT_AUDIO_TRANSFER_STATES.CONNECTED) {
this.bridge.ignoreCallState = false;
} else {
this.bridge.ignoreCallState = true;
}
}
}
}
getBreakoutAudioTransferStatus() {
return this._breakoutAudioTransferStatus;
}
2017-10-18 03:16:42 +08:00
set userData(value) {
this._userData = value;
}
get userData() {
return this._userData;
}
2017-10-23 20:41:09 +08:00
playHangUpSound() {
2021-12-10 04:37:05 +08:00
this.playAlertSound(
`${
window.meetingClientSettings.public.app.cdn +
window.meetingClientSettings.public.app.basename
2021-12-10 04:37:05 +08:00
}` + '/resources/sounds/LeftCall.mp3'
);
}
2019-04-12 21:55:14 +08:00
notify(message, error = false, icon = 'unmute') {
const audioIcon = this.isListenOnly ? 'listen' : icon;
2019-04-12 06:53:57 +08:00
2021-12-10 04:37:05 +08:00
notify(message, error ? 'error' : 'info', audioIcon);
2017-10-23 20:41:09 +08:00
}
monitor() {
const peer = this.bridge.getPeerConnection();
monitorAudioConnection(peer);
}
handleAllowAutoplay() {
window.removeEventListener('audioPlayFailed', this.handlePlayElementFailed);
2021-12-10 04:37:05 +08:00
logger.info(
{
logCode: 'audiomanager_autoplay_allowed',
},
'Listen only autoplay allowed by the user'
);
while (this.failedMediaElements.length) {
const mediaElement = this.failedMediaElements.shift();
if (mediaElement) {
playAndRetry(mediaElement).then((played) => {
if (!played) {
2021-12-10 04:37:05 +08:00
logger.error(
{
logCode: 'audiomanager_autoplay_handling_failed',
},
'Listen only autoplay handling failed to play media'
);
} else {
// logCode is listenonly_* to make it consistent with the other tag play log
2021-12-10 04:37:05 +08:00
logger.info(
{
logCode: 'listenonly_media_play_success',
},
'Listen only media played successfully'
);
}
});
}
}
this.autoplayBlocked = false;
}
handlePlayElementFailed(e) {
const { mediaElement } = e.detail;
e.stopPropagation();
this.failedMediaElements.push(mediaElement);
if (!this.autoplayBlocked) {
2021-12-10 04:37:05 +08:00
logger.info(
{
logCode: 'audiomanager_autoplay_prompt',
},
'Prompting user for action to play listen only media'
);
this.autoplayBlocked = true;
}
}
setSenderTrackEnabled(shouldEnable) {
// If the bridge is set to listen only mode, nothing to do here. This method
// is solely for muting outbound tracks.
if (this.isListenOnly) return;
// Bridge -> SIP.js bridge, the only full audio capable one right now
const peer = this.bridge.getPeerConnection();
if (!peer) {
return;
}
2021-12-10 04:37:05 +08:00
peer.getSenders().forEach((sender) => {
const { track } = sender;
if (track && track.kind === 'audio') {
track.enabled = shouldEnable;
}
});
}
mute() {
this.setSenderTrackEnabled(false);
}
unmute() {
this.setSenderTrackEnabled(true);
}
playAlertSound(url) {
2021-04-13 00:57:02 +08:00
if (!url || !this.bridge) {
return Promise.resolve();
}
const audioAlert = new Audio(url);
2021-12-10 04:37:05 +08:00
audioAlert.addEventListener('ended', () => {
audioAlert.src = null;
});
const { outputDeviceId } = this.bridge;
2021-12-10 04:37:05 +08:00
if (outputDeviceId && typeof audioAlert.setSinkId === 'function') {
return audioAlert.setSinkId(outputDeviceId).then(() => audioAlert.play());
}
return audioAlert.play();
}
async updateAudioConstraints(constraints) {
await this.bridge.updateAudioConstraints(constraints);
}
/**
* Get the info about candidate-pair that is being used by the current peer.
* For firefox, or any other browser that doesn't support iceTransport
* property of RTCDtlsTransport, we retrieve the selected local candidate
* by looking into stats returned from getStats() api. For other browsers,
* we should use getSelectedCandidatePairFromPeer instead, because it has
* relatedAddress and relatedPort information about local candidate.
*
* @param {Object} stats object returned by getStats() api
* @returns An Object of type RTCIceCandidatePairStats containing information
* about the candidate-pair being used by the peer.
*
* For firefox, we can use the 'selected' flag to find the candidate pair
* being used, while in chrome we can retrieved the selected pair
* by looking for the corresponding transport of the active peer.
* For more information see:
* https://www.w3.org/TR/webrtc-stats/#dom-rtcicecandidatepairstats
* and
* https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidatePairStats/selected#value
*/
static getSelectedCandidatePairFromStats(stats) {
if (!stats || typeof stats !== 'object') return null;
2023-07-25 02:56:40 +08:00
const transport = Object.values(stats).find((stat) => stat.type === 'transport') || {};
2021-12-10 04:37:05 +08:00
return Object.values(stats).find(
(stat) =>
stat.type === 'candidate-pair' &&
stat.nominated &&
(stat.selected || stat.id === transport.selectedCandidatePairId)
);
}
/**
* Get the info about candidate-pair that is being used by the current peer.
* This function's return value (RTCIceCandidatePair object ) is different
* from getSelectedCandidatePairFromStats (RTCIceCandidatePairStats object).
* The information returned here contains the relatedAddress and relatedPort
* fields (only for candidates that are derived from another candidate, for
* host candidates, these fields are null). These field can be helpful for
* debugging network issues. For all the browsers that support iceTransport
* field of RTCDtlsTransport, we use this function as default to retrieve
* information about current selected-pair. For other browsers we retrieve it
* from getSelectedCandidatePairFromStats
*
* @returns {Object} An RTCIceCandidatePair represented the selected
* candidate-pair of the active peer.
*
* For more info see:
* https://www.w3.org/TR/webrtc/#dom-rtcicecandidatepair
* and
* https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidatePair
* and
* https://developer.mozilla.org/en-US/docs/Web/API/RTCDtlsTransport
*/
getSelectedCandidatePairFromPeer() {
if (!this.bridge) return null;
const peer = this.bridge.getPeerConnection();
if (!peer) return null;
let selectedPair = null;
const receivers = peer.getReceivers();
2021-12-10 04:37:05 +08:00
if (
receivers &&
receivers[0] &&
receivers[0].transport &&
receivers[0].transport.iceTransport &&
typeof receivers[0].transport.iceTransport.getSelectedCandidatePair === 'function'
2021-12-10 04:37:05 +08:00
) {
2023-07-25 02:56:40 +08:00
selectedPair = receivers[0].transport.iceTransport.getSelectedCandidatePair();
}
return selectedPair;
}
/**
* Gets the selected local-candidate information. For browsers that support
* iceTransport property (see getSelectedCandidatePairFromPeer) we get this
* info from peer, otherwise we retrieve this information from getStats() api
*
* @param {Object} [stats] The status object returned from getStats() api
* @returns {Object} An Object containing the information about the
* local-candidate. For browsers that support iceTransport
* property, the object's type is RCIceCandidate. A
* RTCIceCandidateStats is returned, otherwise.
*
* For more info see:
* https://www.w3.org/TR/webrtc/#dom-rtcicecandidate
* and
* https://www.w3.org/TR/webrtc-stats/#dom-rtcicecandidatestats
*
*/
getSelectedLocalCandidate(stats) {
let selectedPair = this.getSelectedCandidatePairFromPeer();
if (selectedPair) return selectedPair.local;
if (!stats) return null;
selectedPair = AudioManager.getSelectedCandidatePairFromStats(stats);
if (selectedPair) return stats[selectedPair.localCandidateId];
return null;
}
/**
* Gets the information about private/public ip address from peer
* stats. The information retrieved from selected pair from the current
* RTCIceTransport and returned in a new Object with format:
* {
* address: String,
* relatedAddress: String,
* port: Number,
* relatedPort: Number,
* candidateType: String,
* selectedLocalCandidate: Object,
* }
*
* If users isn't behind NAT, relatedAddress and relatedPort may be null.
*
* @returns An Object containing the information about private/public IP
* addresses and ports.
*
* For more information see:
* https://www.w3.org/TR/webrtc-stats/#dom-rtcicecandidatepairstats
* and
* https://www.w3.org/TR/webrtc-stats/#dom-rtcicecandidatestats
* and
* https://www.w3.org/TR/webrtc/#rtcicecandidatetype-enum
*/
async getInternalExternalIpAddresses(stats) {
let transports = {};
if (stats) {
const selectedLocalCandidate = this.getSelectedLocalCandidate(stats);
if (!selectedLocalCandidate) return transports;
2023-07-25 02:56:40 +08:00
const candidateType = selectedLocalCandidate.candidateType || selectedLocalCandidate.type;
transports = {
2021-12-10 04:37:05 +08:00
isUsingTurn: candidateType === 'relay',
address: selectedLocalCandidate.address,
relatedAddress: selectedLocalCandidate.relatedAddress,
port: selectedLocalCandidate.port,
relatedPort: selectedLocalCandidate.relatedPort,
candidateType,
selectedLocalCandidate,
};
}
return transports;
}
/**
* Get stats about active audio peer.
* We filter the status based on FILTER_AUDIO_STATS constant.
* We also append to the returned object the information about peer's
* transport. This transport information is retrieved by
* getInternalExternalIpAddressesFromPeer().
*
* @returns An Object containing the status about the active audio peer.
*
* For more information see:
* https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/getStats
* and
* https://developer.mozilla.org/en-US/docs/Web/API/RTCStatsReport
*/
async getStats() {
if (!this.bridge) return null;
const peer = this.bridge.getPeerConnection();
if (!peer) return null;
const peerStats = await peer.getStats();
const audioStats = {};
peerStats.forEach((stat) => {
if (FILTER_AUDIO_STATS.includes(stat.type)) {
audioStats[stat.id] = stat;
}
});
2023-07-25 02:56:40 +08:00
const transportStats = await this.getInternalExternalIpAddresses(audioStats);
return { transportStats, ...audioStats };
}
}
const audioManager = new AudioManager();
export default audioManager;