bigbluebutton-Github/bigbluebutton-html5/imports/ui/services/bbb-webrtc-sfu/audio-broker.js

272 lines
7.5 KiB
JavaScript
Raw Normal View History

import logger from '/imports/startup/client/logger';
import BaseBroker from '/imports/ui/services/bbb-webrtc-sfu/sfu-base-broker';
import WebRtcPeer from '/imports/ui/services/webrtc-base/peer';
const ON_ICE_CANDIDATE_MSG = 'iceCandidate';
const SUBSCRIBER_ANSWER = 'subscriberAnswer';
const DTMF = 'dtmf';
const SFU_COMPONENT_NAME = 'audio';
class AudioBroker extends BaseBroker {
constructor(
wsUrl,
role,
options = {},
) {
super(SFU_COMPONENT_NAME, wsUrl);
this.role = role;
this.offering = true;
// Optional parameters are:
// clientSessionNumber
// iceServers,
// offering,
// mediaServer,
// extension,
// constraints,
// stream,
// signalCandidates
// traceLogs
// networkPriority
// gatheringTimeout
feat(audio): add experimental transparent listen only mode This is an initial, experimental implementation of the feature proposed in https://github.com/bigbluebutton/bigbluebutton/issues/14021. The intention is to phase out the explicit listen only mode with two overarching goals: - Reduce UX friction and increase familiarity: the existence of a separate listen only mode is a source of confusion for the majority of users Reduce average server-side CPU usage while also making it possible for having full audio-only meetings. The proof-of-concept works based on the assumption that a "many concurrent active talkers" scenario is both rare and not useful. With that in mind, this including two server-side triggers: - On microphone inactivity (currently mute action that is sustained for 4 seconds, configurable): FreeSWITCH channels are held (which translates to much lower CPU usage, virtually 0%). Receiving channels are switched, server side, to a listening mode (SFU, mediasoup). * This required an extension to mediasoup two allow re-assigning producers to already established consumers. No re-negotiation is done. - On microphone activity (currently unmute action, immediate): FreeSWITCH channels are unheld, listening mode is deactivated and the mute state is updated accordingly (in this order). This is *off by default*. It needs to be enabled in two places: - `/etc/bigbluebutton/bbb-webrtc-sfu/production.yml` -> `transparentListenOnly: true` - End users: * Server wide: `/etc/bigbluebutton/bbb-html5.yml` -> `public.media.transparentListenOnly: true` * Per user: `userdata-bbb_transparent_listen_only=true`
2023-08-08 02:28:17 +08:00
// transparentListenOnly
Object.assign(this, options);
}
getLocalStream() {
if (this.webRtcPeer && typeof this.webRtcPeer.getLocalStream === 'function') {
return this.webRtcPeer.getLocalStream();
}
return null;
}
setLocalStream(stream) {
if (this.webRtcPeer == null || this.webRtcPeer.peerConnection == null) {
throw new Error('Missing peer connection');
}
const { peerConnection } = this.webRtcPeer;
const newTracks = stream.getAudioTracks();
const localStream = this.getLocalStream();
const oldTracks = localStream ? localStream.getAudioTracks() : [];
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
peerConnection.getSenders().forEach((sender) => {
if (sender.track == null || sender?.track?.kind === 'audio') {
const newTrack = newTracks.shift();
if (newTrack == null) return;
// Cleanup old tracks in the local MediaStream
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const oldTrack = oldTracks.shift();
sender.replaceTrack(newTrack);
if (oldTrack) {
oldTrack.stop();
localStream.removeTrack(oldTrack);
}
localStream.addTrack(newTrack);
}
});
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
if (oldTracks.length > 0) {
oldTracks.forEach((track) => {
track.stop();
localStream.removeTrack(track);
});
}
return Promise.resolve();
}
_join() {
return new Promise((resolve, reject) => {
try {
const options = {
audioStream: this.stream,
mediaConstraints: {
audio: this.constraints ? this.constraints : true,
video: false,
},
configuration: this.populatePeerConfiguration(),
onicecandidate: !this.signalCandidates ? null : (candidate) => {
this.onIceCandidate(candidate, this.role);
},
trace: this.traceLogs,
networkPriorities: this.networkPriority ? { audio: this.networkPriority } : undefined,
mediaStreamFactory: this.mediaStreamFactory,
gatheringTimeout: this.gatheringTimeout,
};
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
const peerRole = BaseBroker.getPeerRole(this.role);
this.webRtcPeer = new WebRtcPeer(peerRole, options);
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
window.peers = window.peers || [];
window.peers.push(this.webRtcPeer);
this.webRtcPeer.iceQueue = [];
this.webRtcPeer.start();
this.webRtcPeer.peerConnection.onconnectionstatechange = this.handleConnectionStateChange.bind(this);
if (this.offering) {
// We are the offerer
this.webRtcPeer.generateOffer()
.then(this.sendStartReq.bind(this))
.catch(this._handleOfferGenerationFailure.bind(this));
feat(audio): rework audio join without listen only This is a rework of the audio join procedure whithout the explict listen only separation in mind. It's supposed to be used in conjunction with the transparent listen only feature so that the distinction between modes is seamless with minimal server-side impact. An abridged list of changes: - Let the user pick no input device when joining microphone while allowing them to set an input device on the fly later on - Give the user the option to join audio with no input device whenever we fail to obtain input devices, with the option to try re-enabling them on the fly later on - Add the option to open the audio settings modal (echo test et al) via the in-call device selection chevron - Rework the SFU audio bridge and its services to support adding/removing tracks on the fly without renegotiation - Rework the SFU audio bridge and its services to support a new peer role called "passive-sendrecv". That role is used by dupled peers that have no active input source on start, but might have one later on. - Remove stale PermissionsOverlay component from the audio modal - Rework how permission errors are detected using the Permissions API - Rework the local echo test so that it uses a separate media tag rather than the remote - Add new, separate dialplans that mute/hold FreeSWITCH channels on hold based on UA strings. This is orchestrated server-side via webrtc-sfu and akka-apps. The basic difference here is that channels now join in their desired state rather than waiting for client side observers to sync the state up. It also mitigates transparent listen only performance edge cases on multiple audio channels joining at the same time. The old, decoupled listen only mode is still present in code while we validate this new approach. To test this, transparentListenOnly must be enabled and listen only mode must be disable on audio join so that the user skips straight through microphone join.
2024-06-05 19:26:27 +08:00
} else if (peerRole === 'recvonly'
|| peerRole === 'recv'
|| peerRole === 'passive-sendrecv') {
// We are the answerer and we are only listening, so we don't need
// to acquire local media
this.sendStartReq();
} else {
// We are the answerer and we are sending audio, so we need to acquire
// local media before sending the start request
this.webRtcPeer.mediaStreamFactory()
.then(() => { this.sendStartReq(); })
.catch(this._handleOfferGenerationFailure.bind(this));
}
resolve();
} catch (error) {
// 1305: "PEER_NEGOTIATION_FAILED",
const normalizedError = BaseBroker.assembleError(1305);
logger.error({
logCode: `${this.logCodePrefix}_peer_creation_failed`,
extraInfo: {
errorMessage: error.name || error.message || 'Unknown error',
errorCode: normalizedError.errorCode,
sfuComponent: this.sfuComponent,
started: this.started,
},
}, 'Audio peer creation failed');
this.onerror(normalizedError);
reject(normalizedError);
}
});
}
joinAudio() {
return this.openWSConnection()
.then(this._join.bind(this));
}
onWSMessage(message) {
const parsedMessage = JSON.parse(message.data);
switch (parsedMessage.id) {
case 'startResponse':
this.onRemoteDescriptionReceived(parsedMessage);
break;
case 'iceCandidate':
this.handleIceCandidate(parsedMessage.candidate);
break;
case 'webRTCAudioSuccess':
this.onstart(parsedMessage.success);
this.started = true;
break;
case 'webRTCAudioError':
case 'error':
this.handleSFUError(parsedMessage);
break;
case 'pong':
break;
default:
logger.debug({
logCode: `${this.logCodePrefix}_invalid_req`,
extraInfo: { messageId: parsedMessage.id || 'Unknown', sfuComponent: this.sfuComponent },
}, 'Discarded invalid SFU message');
}
}
handleSFUError(sfuResponse) {
const { code, reason, role } = sfuResponse;
const error = BaseBroker.assembleError(code, reason);
logger.error({
logCode: `${this.logCodePrefix}_sfu_error`,
extraInfo: {
errorCode: code,
errorMessage: error.errorMessage,
role,
sfuComponent: this.sfuComponent,
started: this.started,
},
}, 'Audio failed in SFU');
this.onerror(error);
}
sendLocalDescription(localDescription) {
const message = {
id: SUBSCRIBER_ANSWER,
type: this.sfuComponent,
role: this.role,
sdpOffer: localDescription,
};
this.sendMessage(message);
}
onRemoteDescriptionReceived(sfuResponse) {
if (this.offering) {
return this.processAnswer(sfuResponse);
}
return this.processOffer(sfuResponse);
}
sendStartReq(offer) {
const message = {
id: 'start',
type: this.sfuComponent,
role: this.role,
clientSessionNumber: this.clientSessionNumber,
sdpOffer: offer,
mediaServer: this.mediaServer,
extension: this.extension,
feat(audio): add experimental transparent listen only mode This is an initial, experimental implementation of the feature proposed in https://github.com/bigbluebutton/bigbluebutton/issues/14021. The intention is to phase out the explicit listen only mode with two overarching goals: - Reduce UX friction and increase familiarity: the existence of a separate listen only mode is a source of confusion for the majority of users Reduce average server-side CPU usage while also making it possible for having full audio-only meetings. The proof-of-concept works based on the assumption that a "many concurrent active talkers" scenario is both rare and not useful. With that in mind, this including two server-side triggers: - On microphone inactivity (currently mute action that is sustained for 4 seconds, configurable): FreeSWITCH channels are held (which translates to much lower CPU usage, virtually 0%). Receiving channels are switched, server side, to a listening mode (SFU, mediasoup). * This required an extension to mediasoup two allow re-assigning producers to already established consumers. No re-negotiation is done. - On microphone activity (currently unmute action, immediate): FreeSWITCH channels are unheld, listening mode is deactivated and the mute state is updated accordingly (in this order). This is *off by default*. It needs to be enabled in two places: - `/etc/bigbluebutton/bbb-webrtc-sfu/production.yml` -> `transparentListenOnly: true` - End users: * Server wide: `/etc/bigbluebutton/bbb-html5.yml` -> `public.media.transparentListenOnly: true` * Per user: `userdata-bbb_transparent_listen_only=true`
2023-08-08 02:28:17 +08:00
transparentListenOnly: this.transparentListenOnly,
};
logger.debug({
logCode: `${this.logCodePrefix}_offer_generated`,
extraInfo: { sfuComponent: this.sfuComponent, role: this.role },
}, 'SFU audio offer generated');
this.sendMessage(message);
}
_handleOfferGenerationFailure(error) {
if (error) {
logger.error({
logCode: `${this.logCodePrefix}_offer_failure`,
extraInfo: {
errorMessage: error.name || error.message || 'Unknown error',
sfuComponent: this.sfuComponent,
},
}, 'Audio offer generation failed');
// 1305: "PEER_NEGOTIATION_FAILED",
this.onerror(error);
}
}
dtmf(tones) {
const message = {
id: DTMF,
type: this.sfuComponent,
tones,
};
this.sendMessage(message);
}
onIceCandidate(candidate, role) {
const message = {
id: ON_ICE_CANDIDATE_MSG,
role,
type: this.sfuComponent,
candidate,
};
this.sendMessage(message);
}
}
export default AudioBroker;