bigbluebutton-Github/bigbluebutton-html5/imports/ui/components/audio/local-echo/service.js
prlanzarin 325887e325 feat(audio): rework audio join without listen only
This is a rework of the audio join procedure whithout the explict listen
only separation in mind. It's supposed to be used in conjunction with
the transparent listen only feature so that the distinction between
modes is seamless with minimal server-side impact. An abridged list of
changes:
  - Let the user pick no input device when joining microphone while
    allowing them to set an input device on the fly later on
  - Give the user the option to join audio with no input device whenever
    we fail to obtain input devices, with the option to try re-enabling
    them on the fly later on
  - Add the option to open the audio settings modal (echo test et al)
    via the in-call device selection chevron
  - Rework the SFU audio bridge and its services to support
    adding/removing tracks on the fly without renegotiation
  - Rework the SFU audio bridge and its services to support a new peer
    role called "passive-sendrecv". That role is used by dupled peers
    that have no active input source on start, but might have one later
    on.
  - Remove stale PermissionsOverlay component from the audio modal
  - Rework how permission errors are detected using the Permissions API
  - Rework the local echo test so that it uses a separate media tag
    rather than the remote
  - Add new, separate dialplans that mute/hold FreeSWITCH channels on
    hold based on UA strings. This is orchestrated server-side via
    webrtc-sfu and akka-apps. The basic difference here is that channels
    now join in their desired state rather than waiting for client side
    observers to sync the state up. It also mitigates transparent listen
    only performance edge cases on multiple audio channels joining at
    the same time.

The old, decoupled listen only mode is still present in code while we
validate this new approach. To test this, transparentListenOnly
must be enabled and listen only mode must be disable on audio join so
that the user skips straight through microphone join.
2024-08-15 00:43:28 +00:00

134 lines
3.9 KiB
JavaScript

import LocalPCLoopback from '/imports/ui/services/webrtc-base/local-pc-loopback';
import browserInfo from '/imports/utils/browserInfo';
const LOCAL_MEDIA_TAG = '#local-media';
let audioContext = null;
let sourceContext = null;
let contextDestination = null;
let stubAudioElement = null;
let delayNode = null;
const shouldUseRTCLoopback = () => {
const USE_RTC_LOOPBACK_CHR = window.meetingClientSettings.public.media.localEchoTest.useRtcLoopbackInChromium;
return (browserInfo.isChrome || browserInfo.isEdge) && USE_RTC_LOOPBACK_CHR;
};
const createAudioRTCLoopback = () => new LocalPCLoopback({ audio: true });
const cleanupDelayNode = () => {
if (delayNode) {
delayNode.disconnect();
delayNode = null;
}
if (sourceContext) {
sourceContext.disconnect();
sourceContext = null;
}
if (audioContext) {
audioContext.close();
audioContext = null;
}
if (contextDestination) {
contextDestination.disconnect();
contextDestination = null;
}
if (stubAudioElement) {
stubAudioElement.pause();
stubAudioElement.srcObject = null;
stubAudioElement = null;
}
};
const addDelayNode = (stream) => {
const {
delayTime = 0.5,
maxDelayTime = 2,
} = window.meetingClientSettings.public.media.localEchoTest.delay;
if (stream) {
if (delayNode || audioContext || sourceContext) cleanupDelayNode();
const audioElement = document.querySelector(LOCAL_MEDIA_TAG);
// Workaround: attach the stream to a muted stub audio element to be able to play it in
// Chromium-based browsers. See https://bugs.chromium.org/p/chromium/issues/detail?id=933677
stubAudioElement = new Audio();
stubAudioElement.muted = true;
stubAudioElement.srcObject = stream;
// Create a new AudioContext to be able to add a delay to the stream
audioContext = new AudioContext();
sourceContext = audioContext.createMediaStreamSource(stream);
contextDestination = audioContext.createMediaStreamDestination();
// Create a DelayNode to add a delay to the stream
delayNode = new DelayNode(audioContext, { delayTime, maxDelayTime });
// Connect the stream to the DelayNode and then to the MediaStreamDestinationNode
// to be able to play the stream.
sourceContext.connect(delayNode);
delayNode.connect(contextDestination);
delayNode.delayTime.setValueAtTime(delayTime, audioContext.currentTime);
// Play the stream with the delay in the default audio element (local-media)
audioElement.srcObject = contextDestination.stream;
}
};
const deattachEchoStream = () => {
const {
enabled: DELAY_ENABLED = true,
} = window.meetingClientSettings.public.media.localEchoTest.delay;
const audioElement = document.querySelector(LOCAL_MEDIA_TAG);
if (DELAY_ENABLED) {
audioElement.muted = false;
cleanupDelayNode();
}
audioElement.pause();
audioElement.srcObject = null;
};
const playEchoStream = async (stream, loopbackAgent = null) => {
const {
enabled: DELAY_ENABLED = true,
} = window.meetingClientSettings.public.media.localEchoTest.delay;
if (stream) {
deattachEchoStream();
let streamToPlay = stream;
if (loopbackAgent) {
// Chromium based browsers need audio to go through PCs for echo cancellation
// to work. See https://bugs.chromium.org/p/chromium/issues/detail?id=687574
try {
await loopbackAgent.start(stream);
streamToPlay = loopbackAgent.loopbackStream;
} catch (error) {
loopbackAgent.stop();
}
}
if (DELAY_ENABLED) {
addDelayNode(streamToPlay);
} else {
// No delay: play the stream in the default audio element (local-media),
// no strings attached.
const audioElement = document.querySelector(LOCAL_MEDIA_TAG);
audioElement.srcObject = streamToPlay;
audioElement.muted = false;
audioElement.play();
}
}
};
export default {
shouldUseRTCLoopback,
createAudioRTCLoopback,
deattachEchoStream,
playEchoStream,
};