bigbluebutton-Github/bigbluebutton-html5/imports/ui/components/audio/help/component.jsx

236 lines
6.5 KiB
React
Raw Normal View History

2017-11-17 19:52:48 +08:00
import React, { Component } from 'react';
import { injectIntl, defineMessages } from 'react-intl';
import PropTypes from 'prop-types';
2021-11-10 00:20:34 +08:00
import Styled from './styles';
2017-11-17 19:52:48 +08:00
const propTypes = {
intl: PropTypes.shape({
formatMessage: PropTypes.func.isRequired,
}).isRequired,
isListenOnly: PropTypes.bool.isRequired,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
isConnected: PropTypes.bool.isRequired,
audioErr: PropTypes.shape({
code: PropTypes.number,
message: PropTypes.string,
MIC_ERROR: PropTypes.shape({
NO_SSL: PropTypes.number,
MAC_OS_BLOCK: PropTypes.number,
NO_PERMISSION: PropTypes.number,
}),
}).isRequired,
handleBack: PropTypes.func.isRequired,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
handleRetryMic: PropTypes.func.isRequired,
handleJoinListenOnly: PropTypes.func.isRequired,
troubleshootingLink: PropTypes.string,
};
const defaultProps = {
troubleshootingLink: '',
};
2017-11-17 19:52:48 +08:00
const intlMessages = defineMessages({
helpSubtitleMic: {
id: 'app.audioModal.helpSubtitleMic',
description: 'Text description for the audio help subtitle (microphones)',
},
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
helpSubtitlePermission: {
id: 'app.audioModal.helpSubtitlePermission',
description: 'Text description for the audio help subtitle (permission)',
},
helpSubtitleGeneric: {
id: 'app.audioModal.helpSubtitleGeneric',
description: 'Text description for the audio help subtitle (generic)',
},
helpPermissionStep1: {
id: 'app.audioModal.helpPermissionStep1',
description: 'Text description for the audio permission help step 1',
2017-11-17 19:52:48 +08:00
},
helpPermissionStep2: {
id: 'app.audioModal.helpPermissionStep2',
description: 'Text description for the audio permission help step 2',
},
helpPermissionStep3: {
id: 'app.audioModal.helpPermissionStep3',
description: 'Text description for the audio permission help step 3',
},
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
backLabel: {
id: 'app.audio.backLabel',
description: 'audio settings back button label',
},
retryMicLabel: {
id: 'app.audio.audioSettings.retryMicLabel',
description: 'audio settings retry button label',
},
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
listenOnlyLabel: {
id: 'app.audioModal.listenOnlyLabel',
description: 'audio settings listen only button label',
},
noSSL: {
id: 'app.audioModal.help.noSSL',
description: 'Text description for domain not using https',
},
macNotAllowed: {
id: 'app.audioModal.help.macNotAllowed',
description: 'Text description for mac needed to enable OS setting',
},
helpTroubleshoot: {
id: 'app.audioModal.help.troubleshoot',
description: 'Text description for help troubleshoot',
},
unknownError: {
id: 'app.audioModal.help.unknownError',
description: 'Text description for unknown error',
},
errorCode: {
id: 'app.audioModal.help.errorCode',
description: 'Text description for error code',
},
});
2017-11-17 19:52:48 +08:00
class Help extends Component {
getSubtitle() {
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const { audioErr, intl, isListenOnly } = this.props;
const { MIC_ERROR } = audioErr;
if (audioErr.code === MIC_ERROR.NO_PERMISSION) {
return intl.formatMessage(intlMessages.helpSubtitlePermission);
}
return !isListenOnly
? intl.formatMessage(intlMessages.helpSubtitleMic)
: intl.formatMessage(intlMessages.helpSubtitleGeneric);
}
renderNoSSL() {
const { intl } = this.props;
return (
<Styled.Text>
{intl.formatMessage(intlMessages.noSSL)}
</Styled.Text>
);
}
renderMacNotAllowed() {
const { intl } = this.props;
return (
<Styled.Text>
{intl.formatMessage(intlMessages.macNotAllowed)}
</Styled.Text>
);
}
renderPermissionHelp() {
const { intl } = this.props;
return (
<>
<Styled.Text>
{this.getSubtitle()}
</Styled.Text>
<Styled.PermissionHelpSteps>
<li>{intl.formatMessage(intlMessages.helpPermissionStep1)}</li>
<li>{intl.formatMessage(intlMessages.helpPermissionStep2)}</li>
<li>{intl.formatMessage(intlMessages.helpPermissionStep3)}</li>
</Styled.PermissionHelpSteps>
</>
);
}
2017-11-17 19:52:48 +08:00
renderGenericErrorHelp() {
const { intl, audioErr } = this.props;
const { code, message } = audioErr;
2019-09-30 22:54:34 +08:00
return (
<>
<Styled.Text>
{this.getSubtitle()}
</Styled.Text>
<Styled.Text>
{intl.formatMessage(intlMessages.unknownError)}
</Styled.Text>
<Styled.UnknownError>
{intl.formatMessage(intlMessages.errorCode, { 0: code, 1: message || 'UnknownError' })}
</Styled.UnknownError>
</>
);
}
renderHelpMessage() {
const { audioErr } = this.props;
const { MIC_ERROR } = audioErr;
switch (audioErr.code) {
2019-09-30 22:54:34 +08:00
case MIC_ERROR.NO_SSL:
return this.renderNoSSL();
2019-09-30 22:54:34 +08:00
case MIC_ERROR.MAC_OS_BLOCK:
return this.renderMacNotAllowed();
2019-09-30 22:54:34 +08:00
case MIC_ERROR.NO_PERMISSION:
return this.renderPermissionHelp();
default:
return this.renderGenericErrorHelp();
}
}
render() {
const {
intl,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
isConnected,
handleBack,
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
handleRetryMic,
handleJoinListenOnly,
troubleshootingLink,
} = this.props;
2017-11-17 19:52:48 +08:00
return (
2021-11-10 00:20:34 +08:00
<Styled.Help>
{this.renderHelpMessage()}
{ troubleshootingLink && (
<Styled.Text>
<Styled.TroubleshootLink
href={troubleshootingLink}
target="_blank"
rel="noopener noreferrer"
>
{intl.formatMessage(intlMessages.helpTroubleshoot)}
</Styled.TroubleshootLink>
</Styled.Text>
)}
2021-11-10 00:20:34 +08:00
<Styled.EnterAudio>
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
{!isConnected ? (
<Styled.HelpActionButton
label={intl.formatMessage(intlMessages.listenOnlyLabel)}
data-test="helpListenOnlyBtn"
icon="listen"
size="md"
color="secondary"
onClick={handleJoinListenOnly}
/>
) : (
<Styled.HelpActionButton
label={intl.formatMessage(intlMessages.backLabel)}
data-test="helpBackBtn"
color="secondary"
size="md"
onClick={handleBack}
/>
)}
<Styled.HelpActionButton
label={intl.formatMessage(intlMessages.retryMicLabel)}
data-test="helpRetryMicBtn"
icon="unmute"
size="md"
color="primary"
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
onClick={handleRetryMic}
2017-11-17 19:52:48 +08:00
/>
2021-11-10 00:20:34 +08:00
</Styled.EnterAudio>
</Styled.Help>
);
2017-11-17 19:52:48 +08:00
}
}
2017-11-17 19:52:48 +08:00
Help.propTypes = propTypes;
Help.defaultProps = defaultProps;
2017-11-17 19:52:48 +08:00
export default injectIntl(Help);