bigbluebutton-Github/bigbluebutton-html5/imports/ui/components/audio/audio-modal/component.jsx

719 lines
20 KiB
React
Raw Normal View History

feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
import React, {
useCallback,
useEffect,
useState,
} from 'react';
import PropTypes from 'prop-types';
2019-03-29 05:53:42 +08:00
import {
defineMessages, injectIntl, FormattedMessage,
2019-03-29 05:53:42 +08:00
} from 'react-intl';
import { useMutation } from '@apollo/client';
2021-11-09 21:22:59 +08:00
import Styled from './styles';
import AudioSettings from '../audio-settings/component';
2017-10-04 04:42:10 +08:00
import EchoTest from '../echo-test/component';
2017-11-17 19:52:48 +08:00
import Help from '../help/component';
import AudioDial from '../audio-dial/component';
import AudioAutoplayPrompt from '../autoplay/component';
import { getSettingsSingletonInstance } from '/imports/ui/services/settings';
2024-04-04 21:22:26 +08:00
import usePreviousValue from '/imports/ui/hooks/usePreviousValue';
import { SET_AWAY } from '/imports/ui/components/user-list/user-list-content/user-participants/user-list-participants/user-actions/mutations';
2024-06-17 19:54:03 +08:00
import VideoService from '/imports/ui/components/video-provider/service';
2024-04-17 06:39:29 +08:00
import AudioCaptionsSelectContainer from '../audio-graphql/audio-captions/captions/component';
import useToggleVoice from '/imports/ui/components/audio/audio-graphql/hooks/useToggleVoice';
import {
muteAway,
} from '/imports/ui/components/audio/audio-graphql/audio-controls/input-stream-live-selector/service';
import Session from '/imports/ui/services/storage/in-memory';
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
import logger from '/imports/startup/client/logger';
const propTypes = {
2021-08-09 22:24:02 +08:00
intl: PropTypes.shape({
formatMessage: PropTypes.func.isRequired,
}).isRequired,
closeModal: PropTypes.func.isRequired,
joinMicrophone: PropTypes.func.isRequired,
joinListenOnly: PropTypes.func.isRequired,
joinEchoTest: PropTypes.func.isRequired,
exitAudio: PropTypes.func.isRequired,
leaveEchoTest: PropTypes.func.isRequired,
changeInputDevice: PropTypes.func.isRequired,
changeOutputDevice: PropTypes.func.isRequired,
isEchoTest: PropTypes.bool.isRequired,
isConnecting: PropTypes.bool.isRequired,
isConnected: PropTypes.bool.isRequired,
isUsingAudio: PropTypes.bool.isRequired,
isListenOnly: PropTypes.bool.isRequired,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
isMuted: PropTypes.bool.isRequired,
toggleMuteMicrophoneSystem: PropTypes.func.isRequired,
2017-10-23 20:41:09 +08:00
inputDeviceId: PropTypes.string,
outputDeviceId: PropTypes.string,
2019-04-16 00:39:36 +08:00
formattedDialNum: PropTypes.string.isRequired,
listenOnlyMode: PropTypes.bool.isRequired,
joinFullAudioImmediately: PropTypes.bool,
2018-03-06 04:00:52 +08:00
forceListenOnlyAttendee: PropTypes.bool.isRequired,
audioLocked: PropTypes.bool.isRequired,
resolve: PropTypes.func,
isMobileNative: PropTypes.bool.isRequired,
isIE: PropTypes.bool.isRequired,
formattedTelVoice: PropTypes.string.isRequired,
autoplayBlocked: PropTypes.bool.isRequired,
handleAllowAutoplay: PropTypes.func.isRequired,
changeInputStream: PropTypes.func.isRequired,
localEchoEnabled: PropTypes.bool.isRequired,
notify: PropTypes.func.isRequired,
isRTL: PropTypes.bool.isRequired,
priority: PropTypes.string.isRequired,
isOpen: PropTypes.bool.isRequired,
setIsOpen: PropTypes.func.isRequired,
AudioError: PropTypes.shape({
MIC_ERROR: PropTypes.shape({
UNKNOWN: PropTypes.number,
NO_SSL: PropTypes.number,
MAC_OS_BLOCK: PropTypes.number,
NO_PERMISSION: PropTypes.number,
DEVICE_NOT_FOUND: PropTypes.number,
}),
}).isRequired,
getTroubleshootingLink: PropTypes.func.isRequired,
away: PropTypes.bool,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
doGUM: PropTypes.func.isRequired,
hasMicrophonePermission: PropTypes.func.isRequired,
permissionStatus: PropTypes.string,
liveChangeInputDevice: PropTypes.func.isRequired,
content: PropTypes.string,
unmuteOnExit: PropTypes.bool,
supportsTransparentListenOnly: PropTypes.bool.isRequired,
getAudioConstraints: PropTypes.func.isRequired,
isTranscriptionEnabled: PropTypes.bool.isRequired,
2017-10-23 20:41:09 +08:00
};
const intlMessages = defineMessages({
microphoneLabel: {
id: 'app.audioModal.microphoneLabel',
description: 'Join mic audio button label',
},
listenOnlyLabel: {
id: 'app.audioModal.listenOnlyLabel',
description: 'Join listen only audio button label',
},
listenOnlyDesc: {
id: 'app.audioModal.listenOnlyDesc',
description: 'Join listen only audio button description',
},
microphoneDesc: {
id: 'app.audioModal.microphoneDesc',
description: 'Join mic audio button description',
},
closeLabel: {
id: 'app.audioModal.closeLabel',
description: 'close audio modal button label',
},
audioChoiceLabel: {
id: 'app.audioModal.audioChoiceLabel',
description: 'Join audio modal title',
},
iOSError: {
id: 'app.audioModal.iOSBrowser',
description: 'Audio/Video Not supported warning',
},
iOSErrorDescription: {
id: 'app.audioModal.iOSErrorDescription',
description: 'Audio/Video not supported description',
},
iOSErrorRecommendation: {
id: 'app.audioModal.iOSErrorRecommendation',
description: 'Audio/Video recommended action',
},
echoTestTitle: {
id: 'app.audioModal.echoTestTitle',
description: 'Title for the echo test',
},
settingsTitle: {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
id: 'app.audio.audioSettings.titleLabel',
description: 'Title for the audio modal',
},
2017-11-17 19:52:48 +08:00
helpTitle: {
id: 'app.audioModal.helpTitle',
description: 'Title for the audio help',
},
audioDialTitle: {
id: 'app.audioModal.audioDialTitle',
description: 'Title for the audio dial',
},
connecting: {
id: 'app.audioModal.connecting',
description: 'Message for audio connecting',
},
2019-05-08 21:16:00 +08:00
ariaModalTitle: {
id: 'app.audioModal.ariaTitle',
description: 'aria label for modal title',
},
autoplayPromptTitle: {
id: 'app.audioModal.autoplayBlockedDesc',
description: 'Message for autoplay audio block',
},
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
findingDevicesTitle: {
id: 'app.audio.audioSettings.findingDevicesTitle',
description: 'Message for finding audio devices',
},
});
2024-06-11 21:05:08 +08:00
const AudioModal = ({
forceListenOnlyAttendee,
joinFullAudioImmediately = false,
listenOnlyMode,
audioLocked,
isUsingAudio,
isListenOnly,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
isMuted,
toggleMuteMicrophoneSystem,
2024-06-11 21:05:08 +08:00
autoplayBlocked,
closeModal,
isEchoTest,
exitAudio,
resolve = null,
leaveEchoTest,
AudioError,
joinEchoTest,
isConnecting,
localEchoEnabled,
joinListenOnly,
changeInputStream,
joinMicrophone,
intl,
isMobileNative,
formattedDialNum,
isRTL,
isConnected,
inputDeviceId = null,
outputDeviceId = null,
changeInputDevice,
changeOutputDevice,
notify,
formattedTelVoice,
handleAllowAutoplay,
isIE,
isOpen,
priority,
setIsOpen,
getTroubleshootingLink,
away = false,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
doGUM,
getAudioConstraints,
hasMicrophonePermission,
liveChangeInputDevice,
content: initialContent,
supportsTransparentListenOnly,
unmuteOnExit = false,
permissionStatus = null,
isTranscriptionEnabled,
2024-06-11 21:05:08 +08:00
}) => {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const [content, setContent] = useState(initialContent);
const [hasError, setHasError] = useState(false);
const [disableActions, setDisableActions] = useState(false);
const [errorInfo, setErrorInfo] = useState(null);
const [autoplayChecked, setAutoplayChecked] = useState(false);
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const [findingDevices, setFindingDevices] = useState(false);
const [setAway] = useMutation(SET_AWAY);
const voiceToggle = useToggleVoice();
const prevAutoplayBlocked = usePreviousValue(autoplayBlocked);
useEffect(() => {
if (prevAutoplayBlocked && !autoplayBlocked) {
setAutoplayChecked(true);
}
}, [autoplayBlocked]);
const handleJoinAudioError = (err) => {
const { type, errCode, errMessage } = err;
switch (type) {
case 'MEDIA_ERROR':
setContent('help');
setErrorInfo({
errCode,
errMessage,
});
setDisableActions(false);
break;
case 'CONNECTION_ERROR':
default:
setErrorInfo({
errCode,
errMessage: type,
});
setDisableActions(false);
break;
}
};
const handleGoToLocalEcho = () => {
// Simplified echo test: this will return the AudioSettings with:
// - withEcho: true
// Echo test will be local and done in the AudioSettings view instead of the
// old E2E -> yes/no -> join view
setContent('settings');
};
const handleGoToEchoTest = () => {
const { MIC_ERROR } = AudioError;
const noSSL = !window.location.protocol.includes('https');
if (noSSL) {
setContent('help');
setErrorInfo({
errCode: MIC_ERROR.NO_SSL,
errMessage: 'NoSSL',
});
return null;
}
2021-08-09 22:24:02 +08:00
if (disableActions && isConnecting) return null;
if (localEchoEnabled) return handleGoToLocalEcho();
setHasError(false);
setDisableActions(true);
2018-03-06 04:00:52 +08:00
2019-01-18 00:33:43 +08:00
return joinEchoTest().then(() => {
setContent('echoTest');
setDisableActions(true);
}).catch((err) => {
handleJoinAudioError(err);
2017-11-17 19:52:48 +08:00
});
};
2017-11-17 19:52:48 +08:00
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const handleGUMFailure = (error) => {
const { MIC_ERROR } = AudioError;
logger.error({
logCode: 'audio_gum_failed',
extraInfo: {
errorMessage: error.message,
errorName: error.name,
},
}, `Audio gUM failed: ${error.name}`);
setContent('help');
setDisableActions(false);
setHasError(true);
setErrorInfo({
errCode: error?.name === 'NotAllowedError'
? MIC_ERROR.NO_PERMISSION
: 0,
errMessage: error?.name || 'NotAllowedError',
});
};
const checkMicrophonePermission = (options) => {
setFindingDevices(true);
return hasMicrophonePermission(options)
.then((hasPermission) => {
// null means undetermined, so we don't want to show the error modal
// and let downstream components figure it out
if (hasPermission === true || hasPermission === null) {
return hasPermission;
}
handleGUMFailure(new DOMException(
'Permissions API says denied',
'NotAllowedError',
));
return false;
})
.catch((error) => {
handleGUMFailure(error);
return null;
})
.finally(() => {
setFindingDevices(false);
});
};
const handleGoToAudioOptions = () => {
setContent(null);
setDisableActions(false);
};
2017-11-17 19:52:48 +08:00
const handleGoToAudioSettings = () => {
leaveEchoTest().then(() => {
setContent('settings');
});
};
const handleRetryGoToEchoTest = () => {
setHasError(false);
setContent(null);
setErrorInfo(null);
return handleGoToEchoTest();
};
const disableAwayMode = () => {
if (!away) return;
muteAway(false, true, voiceToggle);
setAway({
variables: {
away: false,
},
});
2024-04-24 01:56:23 +08:00
VideoService.setTrackEnabled(true);
};
const handleJoinListenOnly = () => {
2021-08-09 22:24:02 +08:00
if (disableActions && isConnecting) return null;
setDisableActions(true);
setHasError(false);
setErrorInfo(null);
return joinListenOnly().then(() => {
setDisableActions(false);
disableAwayMode();
}).catch((err) => {
handleJoinAudioError(err);
});
};
const handleJoinMicrophone = () => {
if (disableActions && isConnecting) return;
setHasError(false);
setDisableActions(true);
setErrorInfo(null);
2020-03-03 21:59:01 +08:00
joinMicrophone().then(() => {
setDisableActions(false);
}).catch((err) => {
handleJoinAudioError(err);
});
};
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const handleAudioSettingsConfirmation = useCallback((inputStream) => {
// Reset the modal to a connecting state - this kind of sucks?
// prlanzarin Apr 04 2022
setContent(null);
if (inputStream) changeInputStream(inputStream);
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
if (!isConnected) {
handleJoinMicrophone();
disableAwayMode();
} else {
closeModal();
}
}, [changeInputStream, isConnected]);
const skipAudioOptions = useCallback(
// eslint-disable-next-line max-len
() => !hasError && (isConnecting || (forceListenOnlyAttendee && !autoplayChecked) || !listenOnlyMode),
[hasError, isConnecting, forceListenOnlyAttendee, autoplayChecked, listenOnlyMode],
);
const renderAudioOptions = () => {
const hideMicrophone = forceListenOnlyAttendee || audioLocked;
const arrow = isRTL ? '←' : '→';
2019-08-29 22:26:51 +08:00
const dialAudioLabel = `${intl.formatMessage(intlMessages.audioDialTitle)} ${arrow}`;
2019-08-27 04:47:54 +08:00
return (
2019-03-29 05:53:42 +08:00
<div>
<Styled.AudioOptions data-test="audioModalOptions">
{!hideMicrophone && !isMobileNative && (
<>
<Styled.AudioModalButton
label={intl.formatMessage(intlMessages.microphoneLabel)}
data-test="microphoneBtn"
aria-describedby="mic-description"
icon="unmute"
circle
size="jumbo"
disabled={audioLocked}
onClick={
joinFullAudioImmediately
? handleJoinMicrophone
: handleGoToEchoTest
}
/>
<span className="sr-only" id="mic-description">
{intl.formatMessage(intlMessages.microphoneDesc)}
</span>
</>
)}
{listenOnlyMode && (
<>
<Styled.AudioModalButton
label={intl.formatMessage(intlMessages.listenOnlyLabel)}
data-test="listenOnlyBtn"
aria-describedby="listenOnly-description"
icon="listen"
circle
size="jumbo"
onClick={handleJoinListenOnly}
/>
<span className="sr-only" id="listenOnly-description">
{intl.formatMessage(intlMessages.listenOnlyDesc)}
</span>
</>
)}
2021-11-09 21:22:59 +08:00
</Styled.AudioOptions>
2019-04-17 01:04:23 +08:00
{formattedDialNum ? (
2021-11-09 21:22:59 +08:00
<Styled.AudioDial
2019-08-27 04:47:54 +08:00
label={dialAudioLabel}
size="md"
2022-08-05 01:17:45 +08:00
color="secondary"
onClick={() => {
setContent('audioDial');
}}
/>
2019-08-29 22:26:51 +08:00
) : null}
{joinFullAudioImmediately && <AudioCaptionsSelectContainer />}
2019-03-29 05:53:42 +08:00
</div>
);
};
const renderEchoTest = () => (
<EchoTest
handleNo={handleGoToAudioSettings}
handleYes={handleJoinMicrophone}
/>
);
2017-10-05 04:49:11 +08:00
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const handleBack = useCallback(() => {
// If audio is active, audio options are flagged to be skipped (see skipAudioOptions)
// or listen only mode is deactivated (which means there are no actual options
// in the base modal), clicking back on any of the sub-modals should close the base modal
if (isConnecting
|| isConnected
|| skipAudioOptions()) {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
closeModal();
} else {
handleGoToAudioOptions();
}
}, [isConnecting, isConnected, skipAudioOptions, listenOnlyMode]);
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const renderAudioSettings = () => {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const { animations } = getSettingsSingletonInstance().application;
const confirmationCallback = !localEchoEnabled
? handleRetryGoToEchoTest
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
: handleAudioSettingsConfirmation;
2017-10-05 04:49:11 +08:00
return (
<AudioSettings
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
animations={animations}
handleBack={handleBack}
handleConfirmation={confirmationCallback}
handleGUMFailure={handleGUMFailure}
2019-01-18 00:33:43 +08:00
joinEchoTest={joinEchoTest}
changeInputDevice={changeInputDevice}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
liveChangeInputDevice={liveChangeInputDevice}
2019-01-18 00:33:43 +08:00
changeOutputDevice={changeOutputDevice}
2017-10-05 04:49:11 +08:00
isConnecting={isConnecting}
isConnected={isConnected}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
isMuted={isMuted}
toggleMuteMicrophoneSystem={toggleMuteMicrophoneSystem}
2017-10-05 04:49:11 +08:00
inputDeviceId={inputDeviceId}
outputDeviceId={outputDeviceId}
withEcho={localEchoEnabled}
produceStreams
notify={notify}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
unmuteOnExit={unmuteOnExit}
doGUM={doGUM}
getAudioConstraints={getAudioConstraints}
checkMicrophonePermission={checkMicrophonePermission}
supportsTransparentListenOnly={supportsTransparentListenOnly}
toggleVoice={voiceToggle}
permissionStatus={permissionStatus}
isTranscriptionEnabled={isTranscriptionEnabled}
skipAudioOptions={skipAudioOptions}
2017-10-05 04:49:11 +08:00
/>
);
};
2019-09-30 22:54:34 +08:00
const renderHelp = () => {
2019-09-30 22:54:34 +08:00
const audioErr = {
...AudioError,
code: errorInfo?.errCode,
message: errorInfo?.errMessage,
2019-09-30 22:54:34 +08:00
};
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const _joinListenOnly = () => {
// Erase the content state so that the modal transitions to the connecting
// state if the user chooses listen only
setContent(null);
handleJoinListenOnly();
};
2017-11-17 19:52:48 +08:00
return (
<Help
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
isConnected={isConnected}
handleBack={handleBack}
handleJoinListenOnly={_joinListenOnly}
handleRetryMic={handleGoToAudioSettings}
2019-09-30 22:54:34 +08:00
audioErr={audioErr}
isListenOnly={isListenOnly}
troubleshootingLink={getTroubleshootingLink(errorInfo?.errCode)}
2017-11-17 19:52:48 +08:00
/>
);
};
const renderAudioDial = () => (
<AudioDial
formattedDialNum={formattedDialNum}
telVoice={formattedTelVoice}
handleBack={handleGoToAudioOptions}
/>
);
const renderAutoplayOverlay = () => (
<AudioAutoplayPrompt
handleAllowAutoplay={handleAllowAutoplay}
/>
);
const contents = {
echoTest: {
title: intlMessages.echoTestTitle,
component: renderEchoTest,
},
settings: {
title: intlMessages.settingsTitle,
component: renderAudioSettings,
},
help: {
title: intlMessages.helpTitle,
component: renderHelp,
},
audioDial: {
title: intlMessages.audioDialTitle,
component: renderAudioDial,
},
autoplayBlocked: {
title: intlMessages.autoplayPromptTitle,
component: renderAutoplayOverlay,
},
};
const renderContent = () => {
const { animations } = getSettingsSingletonInstance().application;
2017-11-17 19:52:48 +08:00
if (content == null) {
if (findingDevices) {
return (
<Styled.Connecting role="alert">
<span data-test="findingDevicesLabel">
{intl.formatMessage(intlMessages.findingDevicesTitle)}
</span>
<Styled.ConnectingAnimation animations={animations} />
</Styled.Connecting>
);
}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
if (skipAudioOptions()) {
return (
<Styled.Connecting role="alert">
<span data-test={!isEchoTest ? 'establishingAudioLabel' : 'connectingToEchoTest'}>
{intl.formatMessage(intlMessages.connecting)}
</span>
<Styled.ConnectingAnimation animations={animations} />
</Styled.Connecting>
);
}
}
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
return content ? contents[content].component() : renderAudioOptions();
};
useEffect(() => {
if (!isUsingAudio) {
if (forceListenOnlyAttendee || audioLocked) {
handleJoinListenOnly();
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
} else if (!listenOnlyMode) {
if (joinFullAudioImmediately) {
checkMicrophonePermission({ doGUM: true, permissionStatus })
.then((hasPermission) => {
// No permission - let the Help screen be shown as it's triggered
// by the checkMicrophonePermission function
if (hasPermission === false) return;
// Permission is granted or undetermined, so we can proceed
handleJoinMicrophone();
});
} else {
// No need to check for permission here since the AudioSettings
// component will handle it
handleGoToEchoTest();
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
}
}
}
2024-06-13 05:24:13 +08:00
}, [
audioLocked,
isUsingAudio,
forceListenOnlyAttendee,
joinFullAudioImmediately,
listenOnlyMode,
]);
useEffect(() => {
if (autoplayBlocked) {
setContent('autoplayBlocked');
} else if (prevAutoplayBlocked) {
closeModal();
}
}, [autoplayBlocked]);
useEffect(() => () => {
if (isEchoTest) {
exitAudio();
}
if (resolve) resolve();
Session.setItem('audioModalIsOpen', false);
}, []);
let title = content
? intl.formatMessage(contents[content].title)
: intl.formatMessage(intlMessages.audioChoiceLabel);
title = (!skipAudioOptions() && !findingDevices) || content
? title
: null;
return (
<Styled.Background isBlurred={Session.getItem('audioModalIsOpen')}>
<Styled.AudioModal
modalName="AUDIO"
onRequestClose={closeModal}
data-test="audioModal"
contentLabel={intl.formatMessage(intlMessages.ariaModalTitle)}
title={title}
{...{
setIsOpen,
isOpen,
priority,
}}
>
{isIE ? (
<Styled.BrowserWarning>
<FormattedMessage
id="app.audioModal.unsupportedBrowserLabel"
description="Warning when someone joins with a browser that isn't supported"
values={{
0: <a href="https://www.google.com/chrome/">Chrome</a>,
1: <a href="https://getfirefox.com">Firefox</a>,
}}
/>
</Styled.BrowserWarning>
) : null}
<Styled.Content>
{renderContent()}
</Styled.Content>
</Styled.AudioModal>
</Styled.Background>
);
};
AudioModal.propTypes = propTypes;
export default injectIntl(AudioModal);