feat: Initial implementation of Gladia transcriptions to BBB 2.7 (#19091)
* Demo changes
* Revert "feat(captions): no longer writes in the pad"
This reverts commit a76de8c458
.
* feat(transcriptoin): Add config options for the transcription backend
* feat(transcription): Add autodetect option to cc chevron
* feat(transcription): Move transcription options into settings modal
* feat(transcription): Set transcription options via userdata
* fix(transcription): Correct userdata for settings transcription params
* feat(transcriptions): options to auto enable caption button
* feat(transcriptions): Option to hide old CC pad funcionality
* fix(transcription): Fix PR comments
* fix(transcription): Refactor updateTranscript to prevent null user and make it more readable
* feat(transcription): bbb_transcription_provider can be set via userdata
* fix(transcription): Use base10 for parseInt
* fix(transcriptions): Fix CC language divider when using webspeech
* fix(transcriptions): Use a default pad in the settings instead of hardcoding 'en'
We still need to use a language pad such as 'en', but in the future we can better
separate these systems.
* fix(transcription): Add a special permission for automatic transcription updates to the pad and restore old per user updates permission
* feature(transcriptions): Include transcriptions submenu and locales
* chore: bump bbb-transcription-controller to v0.2.0
* fix(transcription): Add missing menu files
* fix(transcription): Fix transcription provider options in settings.yml
* fix: setting password for bbb-transcription-controller
* build: add gladia-proxy.log for transcription-controller
* fix(transcriptions): Remove transcript splitting and floor logic from akka apps
* fix(captions): Show long utterances as split captions, show multiple speaker captions
* chore: bump bbb-transcription-controller to 0.2.1
---------
Co-authored-by: Anton Georgiev <anto.georgiev@gmail.com>
This commit is contained in:
parent
2a38249ddf
commit
125d70699b
@ -21,7 +21,7 @@ trait PadUpdatePubMsgHdlr {
|
||||
bus.outGW.send(msgEvent)
|
||||
}
|
||||
|
||||
if (Pads.hasAccess(liveMeeting, msg.body.externalId, msg.header.userId)) {
|
||||
if (Pads.hasAccess(liveMeeting, msg.body.externalId, msg.header.userId) || msg.body.transcript == true) {
|
||||
Pads.getGroup(liveMeeting.pads, msg.body.externalId) match {
|
||||
case Some(group) => broadcastEvent(group.groupId, msg.body.externalId, msg.body.text)
|
||||
case _ =>
|
||||
|
@ -0,0 +1,41 @@
|
||||
package org.bigbluebutton.core.apps.users
|
||||
|
||||
import org.bigbluebutton.common2.msgs._
|
||||
import org.bigbluebutton.core.models.{ UserState, Users2x }
|
||||
import org.bigbluebutton.core.running.{ LiveMeeting, OutMsgRouter }
|
||||
import org.bigbluebutton.core.apps.{ PermissionCheck, RightsManagementTrait }
|
||||
import org.bigbluebutton.core.domain.MeetingState2x
|
||||
|
||||
trait SetUserSpeechOptionsMsgHdlr extends RightsManagementTrait {
|
||||
this: UsersApp =>
|
||||
|
||||
val liveMeeting: LiveMeeting
|
||||
val outGW: OutMsgRouter
|
||||
|
||||
def handleSetUserSpeechOptionsReqMsg(msg: SetUserSpeechOptionsReqMsg): Unit = {
|
||||
log.info("handleSetUserSpeechOptionsReqMsg: partialUtterances={} minUtteranceLength={} userId={}", msg.body.partialUtterances, msg.body.minUtteranceLength, msg.header.userId)
|
||||
|
||||
def broadcastUserSpeechOptionsChanged(user: UserState, partialUtterances: Boolean, minUtteranceLength: Int): Unit = {
|
||||
val routingChange = Routing.addMsgToClientRouting(
|
||||
MessageTypes.BROADCAST_TO_MEETING,
|
||||
liveMeeting.props.meetingProp.intId, user.intId
|
||||
)
|
||||
val envelopeChange = BbbCoreEnvelope(UserSpeechOptionsChangedEvtMsg.NAME, routingChange)
|
||||
val headerChange = BbbClientMsgHeader(UserSpeechOptionsChangedEvtMsg.NAME, liveMeeting.props.meetingProp.intId, user.intId)
|
||||
|
||||
val bodyChange = UserSpeechOptionsChangedEvtMsgBody(partialUtterances, minUtteranceLength)
|
||||
val eventChange = UserSpeechOptionsChangedEvtMsg(headerChange, bodyChange)
|
||||
val msgEventChange = BbbCommonEnvCoreMsg(envelopeChange, eventChange)
|
||||
outGW.send(msgEventChange)
|
||||
}
|
||||
|
||||
for {
|
||||
user <- Users2x.findWithIntId(liveMeeting.users2x, msg.header.userId)
|
||||
} yield {
|
||||
var changeLocale: Option[UserState] = None;
|
||||
//changeLocale = Users2x.setUserSpeechLocale(liveMeeting.users2x, msg.header.userId, msg.body.locale)
|
||||
broadcastUserSpeechOptionsChanged(user, msg.body.partialUtterances, msg.body.minUtteranceLength)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -150,6 +150,7 @@ class UsersApp(
|
||||
with RegisterUserReqMsgHdlr
|
||||
with ChangeUserRoleCmdMsgHdlr
|
||||
with SetUserSpeechLocaleMsgHdlr
|
||||
with SetUserSpeechOptionsMsgHdlr
|
||||
with SyncGetUsersMeetingRespMsgHdlr
|
||||
with LogoutAndEndMeetingCmdMsgHdlr
|
||||
with SetRecordingStatusCmdMsgHdlr
|
||||
|
@ -7,12 +7,10 @@ import org.bigbluebutton.SystemConfiguration
|
||||
object AudioCaptions extends SystemConfiguration {
|
||||
def setFloor(audioCaptions: AudioCaptions, userId: String) = audioCaptions.floor = userId
|
||||
|
||||
def isFloor(audioCaptions: AudioCaptions, userId: String) = audioCaptions.floor == userId
|
||||
def isFloor(audioCaptions: AudioCaptions, userId: String) = true
|
||||
|
||||
def parseTranscript(transcript: String): String = {
|
||||
val words = transcript.split("\\s+") // Split on whitespaces
|
||||
val lines = words.grouped(transcriptWords).toArray // Group each X words into lines
|
||||
lines.takeRight(transcriptLines).map(l => l.mkString(" ")).mkString("\n") // Join the last X lines
|
||||
transcript
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -113,6 +113,8 @@ class ReceivedJsonMsgHandlerActor(
|
||||
routeGenericMsg[ChangeUserMobileFlagReqMsg](envelope, jsonNode)
|
||||
case SetUserSpeechLocaleReqMsg.NAME =>
|
||||
routeGenericMsg[SetUserSpeechLocaleReqMsg](envelope, jsonNode)
|
||||
case SetUserSpeechOptionsReqMsg.NAME =>
|
||||
routeGenericMsg[SetUserSpeechOptionsReqMsg](envelope, jsonNode)
|
||||
case SelectRandomViewerReqMsg.NAME =>
|
||||
routeGenericMsg[SelectRandomViewerReqMsg](envelope, jsonNode)
|
||||
|
||||
|
@ -399,6 +399,7 @@ class MeetingActor(
|
||||
case m: ChangeUserPinStateReqMsg => usersApp.handleChangeUserPinStateReqMsg(m)
|
||||
case m: ChangeUserMobileFlagReqMsg => usersApp.handleChangeUserMobileFlagReqMsg(m)
|
||||
case m: SetUserSpeechLocaleReqMsg => usersApp.handleSetUserSpeechLocaleReqMsg(m)
|
||||
case m: SetUserSpeechOptionsReqMsg => usersApp.handleSetUserSpeechOptionsReqMsg(m)
|
||||
|
||||
// Client requested to eject user
|
||||
case m: EjectUserFromMeetingCmdMsg =>
|
||||
|
@ -107,7 +107,7 @@ case class PadTailEvtMsgBody(externalId: String, tail: String)
|
||||
// client -> apps
|
||||
object PadUpdatePubMsg { val NAME = "PadUpdatePubMsg" }
|
||||
case class PadUpdatePubMsg(header: BbbClientMsgHeader, body: PadUpdatePubMsgBody) extends StandardMsg
|
||||
case class PadUpdatePubMsgBody(externalId: String, text: String)
|
||||
case class PadUpdatePubMsgBody(externalId: String, text: String, transcript: Boolean)
|
||||
|
||||
// apps -> pads
|
||||
object PadUpdateCmdMsg { val NAME = "PadUpdateCmdMsg" }
|
||||
|
@ -531,3 +531,11 @@ case class SetUserSpeechLocaleReqMsgBody(locale: String, provider: String)
|
||||
object UserSpeechLocaleChangedEvtMsg { val NAME = "UserSpeechLocaleChangedEvtMsg" }
|
||||
case class UserSpeechLocaleChangedEvtMsg(header: BbbClientMsgHeader, body: UserSpeechLocaleChangedEvtMsgBody) extends BbbCoreMsg
|
||||
case class UserSpeechLocaleChangedEvtMsgBody(locale: String, provider: String)
|
||||
|
||||
object SetUserSpeechOptionsReqMsg { val NAME = "SetUserSpeechOptionsReqMsg" }
|
||||
case class SetUserSpeechOptionsReqMsg(header: BbbClientMsgHeader, body: SetUserSpeechOptionsReqMsgBody) extends StandardMsg
|
||||
case class SetUserSpeechOptionsReqMsgBody(partialUtterances: Boolean, minUtteranceLength: Int)
|
||||
|
||||
object UserSpeechOptionsChangedEvtMsg { val NAME = "UserSpeechOptionsChangedEvtMsg" }
|
||||
case class UserSpeechOptionsChangedEvtMsg(header: BbbClientMsgHeader, body: UserSpeechOptionsChangedEvtMsgBody) extends BbbCoreMsg
|
||||
case class UserSpeechOptionsChangedEvtMsgBody(partialUtterances: Boolean, minUtteranceLength: Int)
|
||||
|
@ -1 +1 @@
|
||||
git clone --branch v0.1.0 --depth 1 https://github.com/bigbluebutton/bbb-transcription-controller bbb-transcription-controller
|
||||
git clone --branch v0.2.1 --depth 1 https://github.com/bigbluebutton/bbb-transcription-controller bbb-transcription-controller
|
||||
|
@ -1729,7 +1729,7 @@ if [ -n "$HOST" ]; then
|
||||
sudo yq w -i /usr/local/bigbluebutton/bbb-webrtc-sfu/config/default.yml freeswitch.esl_password "$ESL_PASSWORD"
|
||||
sudo xmlstarlet edit --inplace --update 'configuration/settings//param[@name="password"]/@value' --value $ESL_PASSWORD /opt/freeswitch/etc/freeswitch/autoload_configs/event_socket.conf.xml
|
||||
if [ -f /usr/local/bigbluebutton/bbb-transcription-controller/config/default.yml ]; then
|
||||
sudo yq w -i /usr/local/bigbluebutton/bbb-transcription-controller/config/default.yml freeswitch.esl_password "$ESL_PASSWORD"
|
||||
sudo yq w -i /usr/local/bigbluebutton/bbb-transcription-controller/config/default.yml freeswitch.password "$ESL_PASSWORD"
|
||||
fi
|
||||
|
||||
echo "Restarting BigBlueButton $BIGBLUEBUTTON_RELEASE ..."
|
||||
|
@ -1,12 +1,38 @@
|
||||
import setTranscript from '/imports/api/audio-captions/server/modifiers/setTranscript';
|
||||
import updateTranscriptPad from '/imports/api/pads/server/methods/updateTranscriptPad';
|
||||
import Users from '/imports/api/users';
|
||||
|
||||
const TRANSCRIPTION_DEFAULT_PAD = Meteor.settings.public.captions.defaultPad;
|
||||
|
||||
const formatDate = (dStr) => {
|
||||
return ("00" + dStr).substr(-2,2);
|
||||
};
|
||||
|
||||
export default async function transcriptUpdated({ header, body }) {
|
||||
const { meetingId } = header;
|
||||
const {
|
||||
meetingId,
|
||||
userId,
|
||||
} = header;
|
||||
|
||||
const {
|
||||
transcriptId,
|
||||
transcript,
|
||||
locale,
|
||||
result,
|
||||
} = body;
|
||||
|
||||
await setTranscript(meetingId, transcriptId, transcript);
|
||||
if (result) {
|
||||
const user = Users.findOne({ userId }, { fields: { name: 1 } });
|
||||
const userName = user?.name || '??';
|
||||
|
||||
const dt = new Date(Date.now());
|
||||
const hours = formatDate(dt.getHours()),
|
||||
minutes = formatDate(dt.getMinutes()),
|
||||
seconds = formatDate(dt.getSeconds());
|
||||
|
||||
const userSpoke = `\n ${userName} (${hours}:${minutes}:${seconds}): ${transcript}`;
|
||||
updateTranscriptPad(meetingId, userId, TRANSCRIPTION_DEFAULT_PAD, userSpoke);
|
||||
}
|
||||
|
||||
await setTranscript(userId, meetingId, transcriptId, transcript, locale);
|
||||
}
|
||||
|
@ -1,28 +1,30 @@
|
||||
import { check } from 'meteor/check';
|
||||
import AudioCaptions from '/imports/api/audio-captions';
|
||||
import Users from '/imports/api/users';
|
||||
import Logger from '/imports/startup/server/logger';
|
||||
|
||||
export default async function setTranscript(meetingId, transcriptId, transcript) {
|
||||
export default async function setTranscript(userId, meetingId, transcriptId, transcript, locale) {
|
||||
try {
|
||||
check(meetingId, String);
|
||||
check(transcriptId, String);
|
||||
check(transcript, String);
|
||||
|
||||
const selector = { meetingId };
|
||||
const selector = { meetingId, transcriptId };
|
||||
|
||||
const modifier = {
|
||||
$set: {
|
||||
transcriptId,
|
||||
transcript,
|
||||
lastUpdated: Math.floor(new Date().getTime()/1000),
|
||||
locale,
|
||||
},
|
||||
};
|
||||
|
||||
const numberAffected = await AudioCaptions.upsertAsync(selector, modifier);
|
||||
|
||||
if (numberAffected) {
|
||||
Logger.debug(`Set transcriptId=${transcriptId} transcript=${transcript} meeting=${meetingId}`);
|
||||
Logger.debug(`Set transcriptId=${transcriptId} transcript=${transcript} meeting=${meetingId} locale=${locale}`);
|
||||
} else {
|
||||
Logger.debug(`Upserted transcriptId=${transcriptId} transcript=${transcript} meeting=${meetingId}`);
|
||||
Logger.debug(`Upserted transcriptId=${transcriptId} transcript=${transcript} meeting=${meetingId} locale=${locale}`);
|
||||
}
|
||||
} catch (err) {
|
||||
Logger.error(`Setting audio captions transcript to the collection: ${err}`);
|
||||
|
@ -17,6 +17,7 @@ export default function updatePad(meetingId, userId, externalId, text) {
|
||||
const payload = {
|
||||
externalId,
|
||||
text,
|
||||
transcript: false,
|
||||
};
|
||||
|
||||
RedisPubSub.publishUserMessage(CHANNEL, EVENT_NAME, meetingId, userId, payload);
|
||||
|
@ -0,0 +1,29 @@
|
||||
import RedisPubSub from '/imports/startup/server/redis';
|
||||
import { Meteor } from 'meteor/meteor';
|
||||
import { check } from 'meteor/check';
|
||||
import Logger from '/imports/startup/server/logger';
|
||||
|
||||
export default function updateTranscriptPad(meetingId, userId, externalId, text) {
|
||||
const REDIS_CONFIG = Meteor.settings.private.redis;
|
||||
const CHANNEL = REDIS_CONFIG.channels.toAkkaApps;
|
||||
const EVENT_NAME = 'PadUpdatePubMsg';
|
||||
|
||||
try {
|
||||
check(meetingId, String);
|
||||
check(userId, String);
|
||||
check(externalId, String);
|
||||
check(text, String);
|
||||
|
||||
// Send a special boolean denoting this was updated by the transcript system
|
||||
// this way we can write it in the 'presenter' pad and still block manual updates by viewers
|
||||
const payload = {
|
||||
externalId,
|
||||
text,
|
||||
transcript: true,
|
||||
};
|
||||
|
||||
RedisPubSub.publishUserMessage(CHANNEL, EVENT_NAME, meetingId, userId, payload);
|
||||
} catch (err) {
|
||||
Logger.error(`Exception while invoking method updateTranscriptPad ${err.stack}`);
|
||||
}
|
||||
}
|
@ -65,6 +65,10 @@ const currentParameters = [
|
||||
'bbb_hide_nav_bar',
|
||||
'bbb_change_layout',
|
||||
'bbb_direct_leave_button',
|
||||
// TRANSCRIPTION
|
||||
'bbb_transcription_partial_utterances',
|
||||
'bbb_transcription_min_utterance_length',
|
||||
'bbb_transcription_provider',
|
||||
];
|
||||
|
||||
function valueParser(val) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
import { Meteor } from 'meteor/meteor';
|
||||
import validateAuthToken from './methods/validateAuthToken';
|
||||
import setSpeechLocale from './methods/setSpeechLocale';
|
||||
import setSpeechOptions from './methods/setSpeechOptions';
|
||||
import setMobileUser from './methods/setMobileUser';
|
||||
import setEmojiStatus from './methods/setEmojiStatus';
|
||||
import changeAway from './methods/changeAway';
|
||||
@ -19,6 +20,7 @@ import clearAllUsersEmoji from './methods/clearAllUsersEmoji';
|
||||
|
||||
Meteor.methods({
|
||||
setSpeechLocale,
|
||||
setSpeechOptions,
|
||||
setMobileUser,
|
||||
setEmojiStatus,
|
||||
clearAllUsersEmoji,
|
||||
|
@ -23,7 +23,7 @@ export default function setSpeechLocale(locale, provider) {
|
||||
provider: provider !== 'webspeech' ? provider : '',
|
||||
};
|
||||
|
||||
if (LANGUAGES.includes(locale) || locale === '') {
|
||||
if (LANGUAGES.includes(locale) || locale === '' || locale === 'auto') {
|
||||
RedisPubSub.publishUserMessage(CHANNEL, EVENT_NAME, meetingId, requesterUserId, payload);
|
||||
}
|
||||
} catch (err) {
|
||||
|
@ -0,0 +1,30 @@
|
||||
import { check } from 'meteor/check';
|
||||
import Logger from '/imports/startup/server/logger';
|
||||
import RedisPubSub from '/imports/startup/server/redis';
|
||||
import { extractCredentials } from '/imports/api/common/server/helpers';
|
||||
|
||||
export default async function setSpeechOptions(partialUtterances, minUtteranceLength) {
|
||||
try {
|
||||
const { meetingId, requesterUserId } = extractCredentials(this.userId);
|
||||
|
||||
const REDIS_CONFIG = Meteor.settings.private.redis;
|
||||
const CHANNEL = REDIS_CONFIG.channels.toAkkaApps;
|
||||
const EVENT_NAME = 'SetUserSpeechOptionsReqMsg';
|
||||
|
||||
Logger.info(`Setting speech options for ${meetingId} ${requesterUserId} ${partialUtterances} ${minUtteranceLength}`);
|
||||
|
||||
check(meetingId, String);
|
||||
check(requesterUserId, String);
|
||||
check(partialUtterances, Boolean);
|
||||
check(minUtteranceLength, Number);
|
||||
|
||||
const payload = {
|
||||
partialUtterances,
|
||||
minUtteranceLength,
|
||||
};
|
||||
|
||||
RedisPubSub.publishUserMessage(CHANNEL, EVENT_NAME, meetingId, requesterUserId, payload);
|
||||
} catch (e) {
|
||||
Logger.error(e);
|
||||
}
|
||||
}
|
@ -26,6 +26,7 @@ import BBBStorage from '/imports/ui/services/storage';
|
||||
const CHAT_CONFIG = Meteor.settings.public.chat;
|
||||
const PUBLIC_CHAT_ID = CHAT_CONFIG.public_id;
|
||||
const USER_WAS_EJECTED = 'userWasEjected';
|
||||
const CAPTIONS_ALWAYS_VISIBLE = Meteor.settings.public.app.audioCaptions.alwaysVisible;
|
||||
|
||||
const HTML = document.getElementsByTagName('html')[0];
|
||||
|
||||
@ -98,6 +99,7 @@ class Base extends Component {
|
||||
fullscreenChangedEvents.forEach((event) => {
|
||||
document.addEventListener(event, this.handleFullscreenChange);
|
||||
});
|
||||
Session.set('audioCaptions', CAPTIONS_ALWAYS_VISIBLE);
|
||||
Session.set('isFullscreen', false);
|
||||
}
|
||||
|
||||
|
@ -52,6 +52,7 @@ import NotesContainer from '/imports/ui/components/notes/container';
|
||||
import DEFAULT_VALUES from '../layout/defaultValues';
|
||||
import AppService from '/imports/ui/components/app/service';
|
||||
import TimerService from '/imports/ui/components/timer/service';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
|
||||
const MOBILE_MEDIA = 'only screen and (max-width: 40em)';
|
||||
const APP_CONFIG = Meteor.settings.public.app;
|
||||
@ -171,6 +172,7 @@ class App extends Component {
|
||||
intl,
|
||||
layoutContextDispatch,
|
||||
isRTL,
|
||||
transcriptionSettings,
|
||||
} = this.props;
|
||||
const { browserName } = browserInfo;
|
||||
const { osName } = deviceInfo;
|
||||
@ -232,6 +234,18 @@ class App extends Component {
|
||||
TimerService.OFFSET_INTERVAL);
|
||||
}
|
||||
|
||||
if (transcriptionSettings) {
|
||||
const { partialUtterances, minUtteranceLength } = transcriptionSettings;
|
||||
if (partialUtterances !== undefined || minUtteranceLength !== undefined) {
|
||||
logger.info({ logCode: 'app_component_set_speech_options' }, 'Setting initial speech options');
|
||||
|
||||
Settings.transcription.partialUtterances = partialUtterances ? true : false;
|
||||
Settings.transcription.minUtteranceLength = parseInt(minUtteranceLength, 10);
|
||||
|
||||
SpeechService.setSpeechOptions(Settings.transcription.partialUtterances, Settings.transcription.minUtteranceLength);
|
||||
}
|
||||
}
|
||||
|
||||
logger.info({ logCode: 'app_component_componentdidmount' }, 'Client loaded successfully');
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ import { withTracker } from 'meteor/react-meteor-data';
|
||||
import Auth from '/imports/ui/services/auth';
|
||||
import Users from '/imports/api/users';
|
||||
import Meetings, { LayoutMeetings } from '/imports/api/meetings';
|
||||
import AudioCaptionsLiveContainer from '/imports/ui/components/audio/captions/live/container';
|
||||
import AudioCaptionsLiveContainer from '/imports/ui/components/audio/captions/history/container';
|
||||
import AudioCaptionsService from '/imports/ui/components/audio/captions/service';
|
||||
import { notify } from '/imports/ui/services/notification';
|
||||
import CaptionsContainer from '/imports/ui/components/captions/live/container';
|
||||
@ -281,6 +281,11 @@ export default withTracker(() => {
|
||||
|
||||
const isPresenter = currentUser?.presenter;
|
||||
|
||||
const transcriptionSettings = {
|
||||
partialUtterances: getFromUserSettings('bbb_transcription_partial_utterances'),
|
||||
minUtteranceLength: getFromUserSettings('bbb_transcription_min_utterance_length'),
|
||||
};
|
||||
|
||||
return {
|
||||
captions: CaptionsService.isCaptionsActive() ? <CaptionsContainer /> : null,
|
||||
audioCaptions: AudioCaptionsService.getAudioCaptions() ? <AudioCaptionsLiveContainer /> : null,
|
||||
@ -325,5 +330,6 @@ export default withTracker(() => {
|
||||
hideActionsBar: getFromUserSettings('bbb_hide_actions_bar', false),
|
||||
ignorePollNotifications: Session.get('ignorePollNotifications'),
|
||||
isSharedNotesPinned: MediaService.shouldShowSharedNotes(),
|
||||
transcriptionSettings,
|
||||
};
|
||||
})(AppContainer);
|
||||
|
@ -3,9 +3,13 @@ import PropTypes from 'prop-types';
|
||||
import { defineMessages, injectIntl } from 'react-intl';
|
||||
import Service from '/imports/ui/components/audio/captions/service';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
import ServiceOldCaptions from '/imports/ui/components/captions/service';
|
||||
import ButtonEmoji from '/imports/ui/components/common/button/button-emoji/ButtonEmoji';
|
||||
import BBBMenu from '/imports/ui/components/common/menu/component';
|
||||
import Styled from './styles';
|
||||
import OldCaptionsService from '/imports/ui/components/captions/service';
|
||||
|
||||
const TRANSCRIPTION_DEFAULT_PAD = Meteor.settings.public.captions.defaultPad;
|
||||
|
||||
const intlMessages = defineMessages({
|
||||
start: {
|
||||
@ -34,6 +38,10 @@ const intlMessages = defineMessages({
|
||||
id: 'app.audio.captions.button.language',
|
||||
description: 'Audio speech recognition language label',
|
||||
},
|
||||
autoDetect: {
|
||||
id: 'app.audio.captions.button.autoDetect',
|
||||
description: 'Audio speech recognition language auto detect',
|
||||
},
|
||||
'de-DE': {
|
||||
id: 'app.audio.captions.select.de-DE',
|
||||
description: 'Audio speech recognition german language',
|
||||
@ -89,6 +97,14 @@ const CaptionsButton = ({
|
||||
isSupported,
|
||||
isVoiceUser,
|
||||
}) => {
|
||||
const usePrevious = (value) => {
|
||||
const ref = useRef();
|
||||
useEffect(() => {
|
||||
ref.current = value;
|
||||
});
|
||||
return ref.current;
|
||||
}
|
||||
|
||||
const isTranscriptionDisabled = () => (
|
||||
currentSpeechLocale === DISABLED
|
||||
);
|
||||
@ -104,7 +120,12 @@ const CaptionsButton = ({
|
||||
if (!isTranscriptionDisabled()) selectedLocale.current = getSelectedLocaleValue;
|
||||
}, [currentSpeechLocale]);
|
||||
|
||||
const prevEnabled = usePrevious(enabled);
|
||||
|
||||
if (!enabled) return null;
|
||||
if (!prevEnabled && enabled) {
|
||||
OldCaptionsService.createCaptions(TRANSCRIPTION_DEFAULT_PAD);
|
||||
}
|
||||
|
||||
const shouldRenderChevron = isSupported && isVoiceUser;
|
||||
|
||||
@ -117,7 +138,7 @@ const CaptionsButton = ({
|
||||
iconRight: selectedLocale.current === availableVoice ? 'check' : null,
|
||||
customStyles: (selectedLocale.current === availableVoice) && Styled.SelectedLabel,
|
||||
disabled: isTranscriptionDisabled(),
|
||||
dividerTop: availableVoice === availableVoices[0],
|
||||
dividerTop: !SpeechService.isGladia() && availableVoice === availableVoices[0],
|
||||
onClick: () => {
|
||||
selectedLocale.current = availableVoice;
|
||||
SpeechService.setSpeechLocale(selectedLocale.current);
|
||||
@ -126,6 +147,20 @@ const CaptionsButton = ({
|
||||
))
|
||||
);
|
||||
|
||||
const autoLanguage = SpeechService.isGladia() ? {
|
||||
icon: '',
|
||||
label: intl.formatMessage(intlMessages.autoDetect),
|
||||
key: 'auto',
|
||||
iconRight: selectedLocale.current === 'auto' ? 'check' : null,
|
||||
customStyles: (selectedLocale.current === 'auto') && Styled.SelectedLabel,
|
||||
disabled: isTranscriptionDisabled(),
|
||||
dividerTop: true,
|
||||
onClick: () => {
|
||||
selectedLocale.current = 'auto';
|
||||
SpeechService.setSpeechLocale(selectedLocale.current);
|
||||
},
|
||||
} : undefined;
|
||||
|
||||
const toggleTranscription = () => {
|
||||
SpeechService.setSpeechLocale(isTranscriptionDisabled() ? selectedLocale.current : DISABLED);
|
||||
};
|
||||
@ -138,6 +173,7 @@ const CaptionsButton = ({
|
||||
disabled: true,
|
||||
dividerTop: false,
|
||||
},
|
||||
autoLanguage,
|
||||
...getAvailableLocales(),
|
||||
{
|
||||
key: 'divider',
|
||||
@ -156,7 +192,7 @@ const CaptionsButton = ({
|
||||
disabled: false,
|
||||
dividerTop: true,
|
||||
onClick: toggleTranscription,
|
||||
}]
|
||||
}].filter((e) => e) // filter undefined elements because of 'autoLanguage'
|
||||
);
|
||||
|
||||
const onToggleClick = (e) => {
|
||||
|
@ -0,0 +1,33 @@
|
||||
import React, { PureComponent } from 'react';
|
||||
import PropTypes from 'prop-types';
|
||||
import LiveCaptions from '../live/container';
|
||||
|
||||
class CaptionsHistory extends PureComponent {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
}
|
||||
|
||||
componentDidUpdate(prevProps) {
|
||||
}
|
||||
|
||||
componentWillUnmount() {
|
||||
}
|
||||
|
||||
render() {
|
||||
const { captions } = this.props;
|
||||
|
||||
let i = 0;
|
||||
return captions.map((c) => {
|
||||
i += 1;
|
||||
return <LiveCaptions
|
||||
key={captions.length - i}
|
||||
index={captions.length - i}
|
||||
nCaptions={captions.length}
|
||||
transcriptId={c.transcriptId}
|
||||
transcript={c.transcript}
|
||||
/>
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export default CaptionsHistory;
|
@ -0,0 +1,18 @@
|
||||
import React from 'react';
|
||||
import { withTracker } from 'meteor/react-meteor-data';
|
||||
import Service from '/imports/ui/components/audio/captions/service';
|
||||
import CaptionsHistory from './component';
|
||||
|
||||
const Container = (props) => <CaptionsHistory {...props} />;
|
||||
|
||||
export default withTracker(() => {
|
||||
const captions = Service.getAudioCaptionsData();
|
||||
|
||||
const lastCaption = captions?.length ? captions[captions.length-1] : {};
|
||||
|
||||
return {
|
||||
captions,
|
||||
lastTranscript: lastCaption?.transcript,
|
||||
lastTranscriptId: lastCaption?.transcriptId,
|
||||
};
|
||||
})(Container);
|
@ -14,6 +14,7 @@ class LiveCaptions extends PureComponent {
|
||||
|
||||
componentDidUpdate(prevProps) {
|
||||
const { clear } = this.state;
|
||||
const { index, nCaptions } = this.props;
|
||||
|
||||
if (clear) {
|
||||
const { transcript } = this.props;
|
||||
@ -23,7 +24,7 @@ class LiveCaptions extends PureComponent {
|
||||
}
|
||||
} else {
|
||||
this.resetTimer();
|
||||
this.timer = setTimeout(() => this.setState({ clear: true }), CAPTIONS_CONFIG.time);
|
||||
this.timer = setTimeout(() => this.setState({ clear: true }), (CAPTIONS_CONFIG.time / nCaptions) * (index+1));
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,6 +43,8 @@ class LiveCaptions extends PureComponent {
|
||||
const {
|
||||
transcript,
|
||||
transcriptId,
|
||||
index,
|
||||
nCaptions,
|
||||
} = this.props;
|
||||
|
||||
const { clear } = this.state;
|
||||
|
@ -5,14 +5,6 @@ import LiveCaptions from './component';
|
||||
|
||||
const Container = (props) => <LiveCaptions {...props} />;
|
||||
|
||||
export default withTracker(() => {
|
||||
const {
|
||||
transcriptId,
|
||||
transcript,
|
||||
} = Service.getAudioCaptionsData();
|
||||
|
||||
return {
|
||||
transcript,
|
||||
transcriptId,
|
||||
};
|
||||
export default withTracker((props) => {
|
||||
return props;
|
||||
})(Container);
|
||||
|
@ -12,6 +12,10 @@ const intlMessages = defineMessages({
|
||||
id: 'app.audio.captions.speech.disabled',
|
||||
description: 'Audio speech recognition disabled',
|
||||
},
|
||||
auto: {
|
||||
id: 'app.audio.captions.speech.auto',
|
||||
description: 'Audio speech recognition auto',
|
||||
},
|
||||
unsupported: {
|
||||
id: 'app.audio.captions.speech.unsupported',
|
||||
description: 'Audio speech recognition unsupported',
|
||||
@ -104,6 +108,15 @@ const Select = ({
|
||||
>
|
||||
{intl.formatMessage(intlMessages.disabled)}
|
||||
</option>
|
||||
{SpeechService.isGladia() ?
|
||||
<option
|
||||
key="auto"
|
||||
value="auto"
|
||||
>
|
||||
{intl.formatMessage(intlMessages.auto)}
|
||||
</option>
|
||||
: null
|
||||
}
|
||||
{voices.map((v) => (
|
||||
<option
|
||||
key={v}
|
||||
|
@ -1,20 +1,53 @@
|
||||
import AudioCaptions from '/imports/api/audio-captions';
|
||||
import Auth from '/imports/ui/services/auth';
|
||||
|
||||
const getAudioCaptionsData = () => {
|
||||
const audioCaptions = AudioCaptions.findOne({ meetingId: Auth.meetingID });
|
||||
const CAPTIONS_CONFIG = Meteor.settings.public.captions;
|
||||
const CAPTIONS_ALWAYS_VISIBLE = Meteor.settings.public.app.audioCaptions.alwaysVisible;
|
||||
const CHARACTERS_PER_LINE = CAPTIONS_CONFIG.lineLimit;
|
||||
const LINES_PER_MESSAGE = CAPTIONS_CONFIG.line;
|
||||
const CAPTION_TIME = CAPTIONS_CONFIG.time;
|
||||
const CAPTION_LIMIT = CAPTIONS_CONFIG.captionLimit;
|
||||
|
||||
if (audioCaptions) {
|
||||
return {
|
||||
transcriptId: audioCaptions.transcriptId,
|
||||
transcript: audioCaptions.transcript,
|
||||
};
|
||||
function splitTranscript(obj) {
|
||||
const transcripts = [];
|
||||
const words = obj.transcript.split(' ');
|
||||
|
||||
let currentLine = '';
|
||||
let result = '';
|
||||
|
||||
for (const word of words) {
|
||||
if ((currentLine + word).length <= CHARACTERS_PER_LINE) {
|
||||
currentLine += word + ' ';
|
||||
} else {
|
||||
result += currentLine.trim() + '\n';
|
||||
currentLine = word + ' ';
|
||||
}
|
||||
|
||||
return {
|
||||
transcriptId: '',
|
||||
transcript: '',
|
||||
};
|
||||
if (result.split('\n').length > LINES_PER_MESSAGE) {
|
||||
transcripts.push(result)
|
||||
result = ''
|
||||
}
|
||||
}
|
||||
|
||||
transcripts.push(result)
|
||||
transcripts.push(currentLine.trim())
|
||||
|
||||
return transcripts.map((t) => { return { ...obj, transcript: t} });
|
||||
}
|
||||
|
||||
const getAudioCaptionsData = () => {
|
||||
// the correct way woulde to use { limit: CAPTION_LIMIT } but something
|
||||
// is up with this mongo query and it does not seem to work
|
||||
let audioCaptions = AudioCaptions.find({ meetingId: Auth.meetingID}, { sort: { lastUpdate: -1 } }).fetch().slice(-CAPTION_LIMIT);
|
||||
|
||||
const recentEnough = (c) => (new Date().getTime()/1000 - c.lastUpdated) < CAPTIONS_CONFIG.time/1000;
|
||||
|
||||
audioCaptions = audioCaptions.filter(recentEnough).map((c) => {
|
||||
const splits = splitTranscript(c);
|
||||
return splits;
|
||||
});
|
||||
|
||||
return audioCaptions.flat().filter((c) => c.transcript).slice(-CAPTION_LIMIT);
|
||||
};
|
||||
|
||||
const getAudioCaptions = () => Session.get('audioCaptions') || false;
|
||||
@ -27,7 +60,7 @@ const hasAudioCaptions = () => {
|
||||
{ fields: {} },
|
||||
);
|
||||
|
||||
return !!audioCaptions;
|
||||
return CAPTIONS_ALWAYS_VISIBLE || !!audioCaptions;
|
||||
};
|
||||
|
||||
export default {
|
||||
|
@ -8,12 +8,12 @@ import AudioService from '/imports/ui/components/audio/service';
|
||||
import deviceInfo from '/imports/utils/deviceInfo';
|
||||
import { isLiveTranscriptionEnabled } from '/imports/ui/services/features';
|
||||
import { unique, throttle } from 'radash';
|
||||
import getFromUserSettings from '/imports/ui/services/users-settings';
|
||||
|
||||
const THROTTLE_TIMEOUT = 200;
|
||||
|
||||
const CONFIG = Meteor.settings.public.app.audioCaptions;
|
||||
const ENABLED = CONFIG.enabled;
|
||||
const PROVIDER = CONFIG.provider;
|
||||
const LANGUAGES = CONFIG.language.available;
|
||||
const VALID_ENVIRONMENT = !deviceInfo.isMobile || CONFIG.mobile;
|
||||
|
||||
@ -39,11 +39,19 @@ const getSpeechVoices = () => {
|
||||
return voices.filter((v) => LANGUAGES.includes(v));
|
||||
};
|
||||
|
||||
const getSpeechProvider = () => {
|
||||
return getFromUserSettings("bbb_transcription_provider", CONFIG.provider);
|
||||
};
|
||||
|
||||
const setSpeechOptions = (partialUtterances, minUtteranceLength) => {
|
||||
return makeCall('setSpeechOptions', partialUtterances, minUtteranceLength);
|
||||
};
|
||||
|
||||
const setSpeechLocale = (value) => {
|
||||
const voices = getSpeechVoices();
|
||||
|
||||
if (voices.includes(value) || value === '') {
|
||||
makeCall('setSpeechLocale', value, CONFIG.provider);
|
||||
if (voices.includes(value) || value === '' || (value === 'auto' && isGladia())) {
|
||||
makeCall('setSpeechLocale', value, getSpeechProvider());
|
||||
} else {
|
||||
logger.error({
|
||||
logCode: 'captions_speech_locale',
|
||||
@ -128,15 +136,17 @@ const isLocaleValid = (locale) => LANGUAGES.includes(locale);
|
||||
|
||||
const isEnabled = () => isLiveTranscriptionEnabled();
|
||||
|
||||
const isWebSpeechApi = () => PROVIDER === 'webspeech';
|
||||
const isWebSpeechApi = () => getSpeechProvider() === 'webspeech';
|
||||
|
||||
const isVosk = () => PROVIDER === 'vosk';
|
||||
const isVosk = () => getSpeechProvider() === 'vosk';
|
||||
|
||||
const isWhispering = () => PROVIDER === 'whisper';
|
||||
const isGladia = () => getSpeechProvider() === 'gladia';
|
||||
|
||||
const isDeepSpeech = () => PROVIDER === 'deepSpeech'
|
||||
const isWhispering = () => getSpeechProvider() === 'whisper';
|
||||
|
||||
const isActive = () => isEnabled() && ((isWebSpeechApi() && hasSpeechLocale()) || isVosk() || isWhispering() || isDeepSpeech());
|
||||
const isDeepSpeech = () => getSpeechProvider() === 'deepSpeech'
|
||||
|
||||
const isActive = () => isEnabled() && ((isWebSpeechApi() && hasSpeechLocale()) || isVosk() || isGladia() || isWhispering() || isDeepSpeech());
|
||||
|
||||
const getStatus = () => {
|
||||
const active = isActive();
|
||||
@ -163,7 +173,7 @@ const getLocale = () => {
|
||||
return locale;
|
||||
};
|
||||
|
||||
const stereoUnsupported = () => isActive() && isVosk() && !!getSpeechLocale();
|
||||
const stereoUnsupported = () => isActive() && (isVosk() || isGladia()) && !!getSpeechLocale();
|
||||
|
||||
export default {
|
||||
LANGUAGES,
|
||||
@ -174,6 +184,7 @@ export default {
|
||||
getSpeechVoices,
|
||||
getSpeechLocale,
|
||||
setSpeechLocale,
|
||||
setSpeechOptions,
|
||||
hasSpeechLocale,
|
||||
isLocaleValid,
|
||||
isEnabled,
|
||||
@ -182,4 +193,5 @@ export default {
|
||||
generateId,
|
||||
useFixedLocale,
|
||||
stereoUnsupported,
|
||||
isGladia,
|
||||
};
|
||||
|
@ -11,6 +11,10 @@ import browserInfo from '/imports/utils/browserInfo';
|
||||
import Header from '/imports/ui/components/common/control-header/component';
|
||||
|
||||
const intlMessages = defineMessages({
|
||||
title: {
|
||||
id: 'app.captions.title',
|
||||
description: 'Title for the pad header',
|
||||
},
|
||||
hide: {
|
||||
id: 'app.captions.hide',
|
||||
description: 'Label for hiding closed captions',
|
||||
@ -67,6 +71,7 @@ const Captions = ({
|
||||
hasPermission,
|
||||
layoutContextDispatch,
|
||||
isResizing,
|
||||
autoTranscription,
|
||||
}) => {
|
||||
const { isChrome } = browserInfo;
|
||||
|
||||
@ -85,7 +90,7 @@ const Captions = ({
|
||||
});
|
||||
},
|
||||
'aria-label': intl.formatMessage(intlMessages.hide),
|
||||
label: name,
|
||||
label: autoTranscription ? intl.formatMessage(intlMessages.title) : name,
|
||||
}}
|
||||
customRightButton={Service.amICaptionsOwner(ownerId) ? (
|
||||
<span>
|
||||
|
@ -5,26 +5,13 @@ import Captions from './component';
|
||||
import Auth from '/imports/ui/services/auth';
|
||||
import { layoutSelectInput, layoutDispatch } from '../layout/context';
|
||||
import { ACTIONS, PANELS } from '/imports/ui/components/layout/enums';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
|
||||
const Container = (props) => {
|
||||
const cameraDock = layoutSelectInput((i) => i.cameraDock);
|
||||
const { isResizing } = cameraDock;
|
||||
const layoutContextDispatch = layoutDispatch();
|
||||
|
||||
const { amIModerator } = props;
|
||||
|
||||
if (!amIModerator) {
|
||||
layoutContextDispatch({
|
||||
type: ACTIONS.SET_SIDEBAR_CONTENT_IS_OPEN,
|
||||
value: false,
|
||||
});
|
||||
layoutContextDispatch({
|
||||
type: ACTIONS.SET_SIDEBAR_CONTENT_PANEL,
|
||||
value: PANELS.NONE,
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
return <Captions {...{ layoutContextDispatch, isResizing, ...props }} />;
|
||||
};
|
||||
|
||||
@ -47,5 +34,6 @@ export default withTracker(() => {
|
||||
isRTL,
|
||||
hasPermission: Service.hasPermission(),
|
||||
amIModerator: Service.amIModerator(),
|
||||
autoTranscription: SpeechService.isEnabled(),
|
||||
};
|
||||
})(Container);
|
||||
|
@ -131,6 +131,10 @@ const setCaptionsActive = (locale) => Session.set('captionsActive', locale);
|
||||
const amICaptionsOwner = (ownerId) => ownerId === Auth.userID;
|
||||
|
||||
const isCaptionsAvailable = () => {
|
||||
if (!CAPTIONS_CONFIG.showButton) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isCaptionsEnabled()) {
|
||||
const ownedLocales = getOwnedLocales();
|
||||
|
||||
@ -169,7 +173,7 @@ const getName = (locale) => {
|
||||
locale,
|
||||
});
|
||||
|
||||
return captions.name;
|
||||
return captions?.name;
|
||||
};
|
||||
|
||||
const createCaptions = (locale) => {
|
||||
|
@ -5,6 +5,7 @@ import WriterMenu from './component';
|
||||
import { layoutDispatch } from '../../layout/context';
|
||||
import Auth from '/imports/ui/services/auth';
|
||||
import { UsersContext } from '/imports/ui/components/components-data/users-context/context';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
|
||||
const ROLE_MODERATOR = Meteor.settings.public.user.role_moderator;
|
||||
|
||||
@ -14,9 +15,8 @@ const WriterMenuContainer = (props) => {
|
||||
const usingUsersContext = useContext(UsersContext);
|
||||
const { users } = usingUsersContext;
|
||||
const currentUser = users[Auth.meetingID][Auth.userID];
|
||||
const amIModerator = currentUser.role === ROLE_MODERATOR;
|
||||
|
||||
return amIModerator && <WriterMenu {...{ layoutContextDispatch, ...props }} />;
|
||||
return <WriterMenu {...{ layoutContextDispatch, ...props }} />;
|
||||
};
|
||||
|
||||
export default withTracker(({ setIsOpen }) => ({
|
||||
|
@ -4,6 +4,7 @@ import { defineMessages, injectIntl } from 'react-intl';
|
||||
import DataSaving from '/imports/ui/components/settings/submenus/data-saving/component';
|
||||
import Application from '/imports/ui/components/settings/submenus/application/component';
|
||||
import Notification from '/imports/ui/components/settings/submenus/notification/component';
|
||||
import Transcription from '/imports/ui/components/settings/submenus/transcription/component';
|
||||
import { clone } from 'radash';
|
||||
import PropTypes from 'prop-types';
|
||||
import Styled from './styles';
|
||||
@ -54,6 +55,10 @@ const intlMessages = defineMessages({
|
||||
id: 'app.settings.dataSavingTab.label',
|
||||
description: 'label for data savings tab',
|
||||
},
|
||||
transcriptionLabel: {
|
||||
id: 'app.settings.transcriptionTab.label',
|
||||
description: 'label for transcriptions tab',
|
||||
},
|
||||
savedAlertLabel: {
|
||||
id: 'app.settings.save-notification.label',
|
||||
description: 'label shown in toast when settings are saved',
|
||||
@ -76,6 +81,10 @@ const propTypes = {
|
||||
viewParticipantsWebcams: PropTypes.bool,
|
||||
viewScreenshare: PropTypes.bool,
|
||||
}).isRequired,
|
||||
transcription: PropTypes.shape({
|
||||
partialUtterances: PropTypes.bool,
|
||||
minUtteraceLength: PropTypes.number,
|
||||
}).isRequired,
|
||||
application: PropTypes.shape({
|
||||
chatAudioAlerts: PropTypes.bool,
|
||||
chatPushAlerts: PropTypes.bool,
|
||||
@ -95,6 +104,7 @@ const propTypes = {
|
||||
availableLocales: PropTypes.objectOf(PropTypes.array).isRequired,
|
||||
showToggleLabel: PropTypes.bool.isRequired,
|
||||
isReactionsEnabled: PropTypes.bool.isRequired,
|
||||
isGladiaEnabled: PropTypes.bool.isRequired,
|
||||
};
|
||||
|
||||
class Settings extends Component {
|
||||
@ -106,17 +116,19 @@ class Settings extends Component {
|
||||
super(props);
|
||||
|
||||
const {
|
||||
dataSaving, application, selectedTab,
|
||||
dataSaving, application, transcription, selectedTab,
|
||||
} = props;
|
||||
|
||||
this.state = {
|
||||
current: {
|
||||
dataSaving: clone(dataSaving),
|
||||
application: clone(application),
|
||||
transcription: clone(transcription),
|
||||
},
|
||||
saved: {
|
||||
dataSaving: clone(dataSaving),
|
||||
application: clone(application),
|
||||
transcription: clone(transcription),
|
||||
},
|
||||
selectedTab: Number.isFinite(selectedTab) && selectedTab >= 0 && selectedTab <= 2
|
||||
? selectedTab
|
||||
@ -175,6 +187,7 @@ class Settings extends Component {
|
||||
isScreenSharingEnabled,
|
||||
isVideoEnabled,
|
||||
isReactionsEnabled,
|
||||
isGladiaEnabled,
|
||||
} = this.props;
|
||||
|
||||
const {
|
||||
@ -216,6 +229,17 @@ class Settings extends Component {
|
||||
</Styled.SettingsTabSelector>
|
||||
)
|
||||
: null}
|
||||
{isGladiaEnabled
|
||||
? (
|
||||
<Styled.SettingsTabSelector
|
||||
aria-labelledby="transcriptionTab"
|
||||
selectedClassName="is-selected"
|
||||
>
|
||||
<Styled.SettingsIcon iconName="closed_caption" />
|
||||
<span id="transcriptionTab">{intl.formatMessage(intlMessages.transcriptionLabel)}</span>
|
||||
</Styled.SettingsTabSelector>
|
||||
)
|
||||
: null}
|
||||
</Styled.SettingsTabList>
|
||||
<Styled.SettingsTabPanel selectedClassName="is-selected">
|
||||
<Application
|
||||
@ -254,6 +278,17 @@ class Settings extends Component {
|
||||
</Styled.SettingsTabPanel>
|
||||
)
|
||||
: null}
|
||||
{isGladiaEnabled
|
||||
? (
|
||||
<Styled.SettingsTabPanel selectedClassName="is-selected">
|
||||
<Transcription
|
||||
handleUpdateSettings={this.handleUpdateSettings}
|
||||
settings={current.transcription}
|
||||
displaySettingsStatus={this.displaySettingsStatus}
|
||||
/>
|
||||
</Styled.SettingsTabPanel>
|
||||
)
|
||||
: null}
|
||||
</Styled.SettingsTabs>
|
||||
);
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import Settings from './component';
|
||||
import { layoutDispatch } from '../layout/context';
|
||||
import { isScreenSharingEnabled } from '/imports/ui/services/features';
|
||||
import UserReactionService from '/imports/ui/components/user-reaction/service';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
|
||||
import {
|
||||
getUserRoles,
|
||||
@ -25,6 +26,7 @@ export default withTracker((props) => ({
|
||||
audio: SettingsService.audio,
|
||||
dataSaving: SettingsService.dataSaving,
|
||||
application: SettingsService.application,
|
||||
transcription: SettingsService.transcription,
|
||||
updateSettings,
|
||||
availableLocales: getAvailableLocales(),
|
||||
isPresenter: isPresenter(),
|
||||
@ -34,4 +36,5 @@ export default withTracker((props) => ({
|
||||
isScreenSharingEnabled: isScreenSharingEnabled(),
|
||||
isVideoEnabled: Meteor.settings.public.kurento.enableVideo,
|
||||
isReactionsEnabled: UserReactionService.isEnabled(),
|
||||
isGladiaEnabled: SpeechService.isActive() && SpeechService.isGladia(),
|
||||
}))(SettingsContainer);
|
||||
|
@ -3,6 +3,7 @@ import Auth from '/imports/ui/services/auth';
|
||||
import Settings from '/imports/ui/services/settings';
|
||||
import {notify} from '/imports/ui/services/notification';
|
||||
import GuestService from '/imports/ui/components/waiting-users/service';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
import Intl from '/imports/ui/services/locale';
|
||||
|
||||
const getUserRoles = () => {
|
||||
@ -35,6 +36,11 @@ const updateSettings = (obj, msgDescriptor) => {
|
||||
Object.keys(obj).forEach(k => (Settings[k] = obj[k]));
|
||||
Settings.save();
|
||||
|
||||
if (obj.transcription) {
|
||||
const { partialUtterances, minUtteranceLength } = obj.transcription;
|
||||
SpeechService.setSpeechOptions(partialUtterances, parseInt(minUtteranceLength, 10));
|
||||
}
|
||||
|
||||
if (msgDescriptor) {
|
||||
// prevents React state update on unmounted component
|
||||
setTimeout(() => {
|
||||
|
@ -15,4 +15,13 @@ export default class BaseMenu extends React.Component {
|
||||
this.handleUpdateSettings(this.state.settingsName, this.state.settings);
|
||||
});
|
||||
}
|
||||
|
||||
handleInput(key, e) {
|
||||
const obj = this.state;
|
||||
obj.settings[key] = e.target.value;
|
||||
|
||||
this.setState(obj, () => {
|
||||
this.handleUpdateSettings(this.state.settingsName, this.state.settings);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,97 @@
|
||||
import React from 'react';
|
||||
import Toggle from '/imports/ui/components/common/switch/component';
|
||||
import { defineMessages, injectIntl } from 'react-intl';
|
||||
import BaseMenu from '../base/component';
|
||||
import Styled from './styles';
|
||||
|
||||
const intlMessages = defineMessages({
|
||||
transcriptionLabel: {
|
||||
id: 'app.submenu.transcription.sectionTitle',
|
||||
},
|
||||
transcriptionDesc: {
|
||||
id: 'app.submenu.transcription.desc',
|
||||
},
|
||||
partialUtterancesLabel: {
|
||||
id: 'app.settings.transcriptionTab.partialUtterances',
|
||||
},
|
||||
minUtteranceLengthLabel: {
|
||||
id: 'app.settings.transcriptionTab.minUtteranceLength',
|
||||
},
|
||||
});
|
||||
|
||||
class Transcription extends BaseMenu {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
|
||||
this.state = {
|
||||
settingsName: 'transcription',
|
||||
settings: props.settings,
|
||||
};
|
||||
}
|
||||
|
||||
render() {
|
||||
const {
|
||||
intl,
|
||||
showToggleLabel,
|
||||
displaySettingsStatus,
|
||||
} = this.props;
|
||||
|
||||
const { partialUtterances, minUtteranceLength } = this.state.settings;
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div>
|
||||
<Styled.Title>{intl.formatMessage(intlMessages.transcriptionLabel)}</Styled.Title>
|
||||
<Styled.SubTitle>{intl.formatMessage(intlMessages.transcriptionDesc)}</Styled.SubTitle>
|
||||
</div>
|
||||
|
||||
<Styled.Form>
|
||||
<Styled.Row>
|
||||
<Styled.Col aria-hidden>
|
||||
<Styled.FormElement>
|
||||
<Styled.Label>
|
||||
{intl.formatMessage(intlMessages.partialUtterancesLabel)}
|
||||
</Styled.Label>
|
||||
</Styled.FormElement>
|
||||
</Styled.Col>
|
||||
<Styled.Col>
|
||||
<Styled.FormElementRight>
|
||||
<Toggle
|
||||
icons={false}
|
||||
defaultChecked={partialUtterances}
|
||||
onChange={() => this.handleToggle('partialUtterances')}
|
||||
ariaLabelledBy="partialUtterances"
|
||||
ariaLabel={`${intl.formatMessage(intlMessages.partialUtterancesLabel)} - ${displaySettingsStatus(partialUtterances, true)}`}
|
||||
showToggleLabel={showToggleLabel}
|
||||
/>
|
||||
</Styled.FormElementRight>
|
||||
</Styled.Col>
|
||||
</Styled.Row>
|
||||
<Styled.Row>
|
||||
<Styled.Col aria-hidden>
|
||||
<Styled.FormElement>
|
||||
<Styled.Label>
|
||||
{intl.formatMessage(intlMessages.minUtteranceLengthLabel)}
|
||||
</Styled.Label>
|
||||
</Styled.FormElement>
|
||||
</Styled.Col>
|
||||
<Styled.Col>
|
||||
<Styled.FormElementRight>
|
||||
<input
|
||||
value={minUtteranceLength}
|
||||
onChange={ (e) => this.handleInput('minUtteranceLength', e) }
|
||||
type="number"
|
||||
max="5"
|
||||
min="0"
|
||||
>
|
||||
</input>
|
||||
</Styled.FormElementRight>
|
||||
</Styled.Col>
|
||||
</Styled.Row>
|
||||
</Styled.Form>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default injectIntl(Transcription);
|
@ -0,0 +1,29 @@
|
||||
import styled from 'styled-components';
|
||||
import Styled from '/imports/ui/components/settings/submenus/styles';
|
||||
|
||||
const Title = styled(Styled.Title)``;
|
||||
|
||||
const SubTitle = styled(Styled.SubTitle)``;
|
||||
|
||||
const Form = styled(Styled.Form)``;
|
||||
|
||||
const Row = styled(Styled.Row)``;
|
||||
|
||||
const Col = styled(Styled.Col)``;
|
||||
|
||||
const FormElement = styled(Styled.FormElement)``;
|
||||
|
||||
const FormElementRight = styled(Styled.FormElementRight)``;
|
||||
|
||||
const Label = styled(Styled.Label)``;
|
||||
|
||||
export default {
|
||||
Title,
|
||||
SubTitle,
|
||||
Form,
|
||||
Row,
|
||||
Col,
|
||||
FormElement,
|
||||
FormElementRight,
|
||||
Label,
|
||||
};
|
@ -20,6 +20,10 @@ const intlMessages = defineMessages({
|
||||
id: 'app.captions.label',
|
||||
description: 'used for captions button aria label',
|
||||
},
|
||||
captionTitle: {
|
||||
id: 'app.captions.title',
|
||||
description: 'title for the transcription pad button on the sidebar',
|
||||
},
|
||||
});
|
||||
|
||||
const CaptionsListItem = (props) => {
|
||||
@ -70,7 +74,7 @@ const CaptionsListItem = (props) => {
|
||||
onKeyPress={() => {}}
|
||||
>
|
||||
<Icon iconName="closed_caption" />
|
||||
<span aria-hidden>{name}</span>
|
||||
<span aria-hidden>{intl.formatMessage(intlMessages.captionTitle)}</span>
|
||||
</Styled.ListItem>
|
||||
);
|
||||
};
|
||||
|
@ -36,7 +36,7 @@ class UserContent extends PureComponent {
|
||||
return (
|
||||
<Styled.Content data-test="userListContent">
|
||||
{isChatEnabled() ? <UserMessagesContainer /> : null}
|
||||
{currentUser.role === ROLE_MODERATOR ? <UserCaptionsContainer /> : null}
|
||||
<UserCaptionsContainer />
|
||||
<UserNotesContainer />
|
||||
{ isTimerActive && (
|
||||
<TimerContainer
|
||||
|
@ -14,6 +14,7 @@ const SETTINGS = [
|
||||
'dataSaving',
|
||||
'animations',
|
||||
'selfViewDisable',
|
||||
'transcription',
|
||||
];
|
||||
|
||||
const CHANGED_SETTINGS = 'changed_settings';
|
||||
|
@ -89,9 +89,10 @@ public:
|
||||
# https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API/Using_the_Web_Speech_API#speech_recognition
|
||||
audioCaptions:
|
||||
enabled: false
|
||||
alwaysVisible: false
|
||||
# mobile: <Boolean> - controls speech transcription availability on mobile
|
||||
mobile: false
|
||||
# provider: [webspeech, vosk, whisper, deepspeech]
|
||||
# provider: [webspeech, vosk, gladia]
|
||||
provider: webspeech
|
||||
language:
|
||||
available:
|
||||
@ -238,6 +239,12 @@ public:
|
||||
dataSaving:
|
||||
viewParticipantsWebcams: true
|
||||
viewScreenshare: true
|
||||
# Options that are sent to the transcription backed (only Gladia is supported for now)
|
||||
transcription:
|
||||
# Indicates if the transcription backend should include partial results
|
||||
partialUtterances: true
|
||||
# The minumum length (in seconds) an utterance has to have for we to use it
|
||||
minUtteranceLength: 3
|
||||
shortcuts:
|
||||
openOptions:
|
||||
accesskey: O
|
||||
@ -543,14 +550,23 @@ public:
|
||||
chatMessage: true
|
||||
captions:
|
||||
enabled: true
|
||||
showButton: false
|
||||
id: captions
|
||||
dictation: false
|
||||
# Default pad which will store automatically generated captions
|
||||
defaultPad: en
|
||||
background: '#000000'
|
||||
font:
|
||||
color: '#ffffff'
|
||||
family: Calibri
|
||||
size: 24px
|
||||
# maximum number of simultaneous captions on screen
|
||||
captionLimit: 3
|
||||
# maximum size of a caption line in characters
|
||||
lineLimit: 60
|
||||
# maximum number of lines a caption can have on screen
|
||||
lines: 2
|
||||
# time the captions will hang on screen after last updated
|
||||
time: 5000
|
||||
timer:
|
||||
enabled: true
|
||||
|
@ -82,6 +82,7 @@
|
||||
"app.timer.track2": "Calm",
|
||||
"app.timer.track3": "Happy",
|
||||
"app.captions.label": "Captions",
|
||||
"app.captions.title": "Transcription",
|
||||
"app.captions.menu.close": "Close",
|
||||
"app.captions.menu.start": "Start",
|
||||
"app.captions.menu.ariaStart": "Start writing captions",
|
||||
@ -598,10 +599,13 @@
|
||||
"app.submenu.video.videoQualityLabel": "Video quality",
|
||||
"app.submenu.video.qualityOptionLabel": "Choose the video quality",
|
||||
"app.submenu.video.participantsCamLabel": "Viewing participants webcams",
|
||||
"app.submenu.transcription.desc": "Adjust transcription backend parameters",
|
||||
"app.submenu.transcription.sectionTitle": "Transcription options",
|
||||
"app.settings.applicationTab.label": "Application",
|
||||
"app.settings.audioTab.label": "Audio",
|
||||
"app.settings.videoTab.label": "Video",
|
||||
"app.settings.usersTab.label": "Participants",
|
||||
"app.settings.transcriptionTab.label": "Transcription",
|
||||
"app.settings.main.label": "Settings",
|
||||
"app.settings.main.cancel.label": "Cancel",
|
||||
"app.settings.main.cancel.label.description": "Discards the changes and closes the settings menu",
|
||||
@ -611,6 +615,9 @@
|
||||
"app.settings.dataSavingTab.webcam": "Enable other participants webcams",
|
||||
"app.settings.dataSavingTab.screenShare": "Enable other participants desktop sharing",
|
||||
"app.settings.dataSavingTab.description": "To save your bandwidth adjust what's currently being displayed.",
|
||||
"app.settings.transcriptionTab.label": "Transcription",
|
||||
"app.settings.transcriptionTab.partialUtterances": "Show partial utterances",
|
||||
"app.settings.transcriptionTab.minUtteranceLength": "Minimum utterances length (seconds)",
|
||||
"app.settings.save-notification.label": "Settings have been saved",
|
||||
"app.statusNotifier.lowerHands": "Lower Hands",
|
||||
"app.statusNotifier.lowerHandDescOneUser": "Lower {0}'s hand",
|
||||
@ -777,8 +784,10 @@
|
||||
"app.audio.captions.button.language": "Language",
|
||||
"app.audio.captions.button.transcription": "Transcription",
|
||||
"app.audio.captions.button.transcriptionSettings": "Transcription settings",
|
||||
"app.audio.captions.button.autoDetect": "Auto Detect",
|
||||
"app.audio.captions.speech.title": "Automatic transcription",
|
||||
"app.audio.captions.speech.disabled": "Disabled",
|
||||
"app.audio.captions.speech.auto": "Auto Detect",
|
||||
"app.audio.captions.speech.unsupported": "Your browser doesn't support speech recognition. Your audio won't be transcribed",
|
||||
"app.audio.captions.select.de-DE": "German",
|
||||
"app.audio.captions.select.en-US": "English",
|
||||
|
@ -82,6 +82,7 @@
|
||||
"app.timer.track2": "Calme",
|
||||
"app.timer.track3": "Heureux",
|
||||
"app.captions.label": "Sous-titres",
|
||||
"app.captions.title": "Transcription",
|
||||
"app.captions.menu.close": "Fermer",
|
||||
"app.captions.menu.start": "Démarrer",
|
||||
"app.captions.menu.ariaStart": "Démarrer le sous-titrage",
|
||||
@ -611,6 +612,9 @@
|
||||
"app.settings.dataSavingTab.webcam": "Activer les webcams des autres participants",
|
||||
"app.settings.dataSavingTab.screenShare": "Activer le partage d'écran des autres participants",
|
||||
"app.settings.dataSavingTab.description": "Pour économiser votre bande passante, ajustez l'affichage actuel.",
|
||||
"app.settings.transcriptionTab.label": "Transcription",
|
||||
"app.settings.transcriptionTab.partialUtterances": "Afficher les énoncés partiels",
|
||||
"app.settings.transcriptionTab.minUtteranceLength": "Durée minimale de l'énoncé (secondes)",
|
||||
"app.settings.save-notification.label": "Les paramètres ont été enregistrés",
|
||||
"app.statusNotifier.lowerHands": "Mains baissées",
|
||||
"app.statusNotifier.lowerHandDescOneUser": "Abaisser la main de {0}",
|
||||
@ -772,6 +776,7 @@
|
||||
"app.audio.listenOnly.closeLabel": "Fermer",
|
||||
"app.audio.permissionsOverlay.title": "Autoriser BigBlueButton à utiliser votre micro",
|
||||
"app.audio.permissionsOverlay.hint": "Il est nécessaire que vous nous autorisiez à utiliser vos appareils multimédias pour que vous puissiez participer à la réunion",
|
||||
"app.audio.captions.button.autoDetect": "Détection Automatique",
|
||||
"app.audio.captions.button.start": "Initier un sous titrage SME",
|
||||
"app.audio.captions.button.stop": "Arrêter le sous titrage SME",
|
||||
"app.audio.captions.button.language": "Langue",
|
||||
@ -780,6 +785,7 @@
|
||||
"app.audio.captions.speech.title": "Transcription automatique",
|
||||
"app.audio.captions.speech.disabled": "Desactivé",
|
||||
"app.audio.captions.speech.unsupported": "Votre navigateur n'est pas compatible avec la reconnaissance vocale. L'entrée audio ne sera pas retranscrite.",
|
||||
"app.audio.captions.speech.auto": "Détection Automatique",
|
||||
"app.audio.captions.select.de-DE": "Allemand",
|
||||
"app.audio.captions.select.en-US": "Anglais",
|
||||
"app.audio.captions.select.es-ES": "Espagnol",
|
||||
|
@ -82,6 +82,7 @@
|
||||
"app.timer.track2": "Calmo(a)",
|
||||
"app.timer.track3": "Feliz",
|
||||
"app.captions.label": "Legendas",
|
||||
"app.captions.title": "Transcrição",
|
||||
"app.captions.menu.close": "Fechar",
|
||||
"app.captions.menu.start": "Iniciar",
|
||||
"app.captions.menu.ariaStart": "Comece a escrever legendas",
|
||||
@ -610,6 +611,9 @@
|
||||
"app.settings.dataSavingTab.webcam": "Ativar webcams",
|
||||
"app.settings.dataSavingTab.screenShare": "Ativar o compartilhamento de tela",
|
||||
"app.settings.dataSavingTab.description": "Para economizar o volume de transferência de dados, ajuste o que está sendo exibido no momento.",
|
||||
"app.settings.transcriptionTab.label": "Transcrição",
|
||||
"app.settings.transcriptionTab.partialUtterances": "Mostrar frases parciais",
|
||||
"app.settings.transcriptionTab.minUtteranceLength": "Tempo mínimo para frases parciais (segundos)",
|
||||
"app.settings.save-notification.label": "As configurações foram salvas",
|
||||
"app.statusNotifier.lowerHands": "Mãos baixadas",
|
||||
"app.statusNotifier.lowerHandDescOneUser": "Abaixar a mão de {0}",
|
||||
@ -779,6 +783,7 @@
|
||||
"app.audio.captions.speech.title": "Transcrição automática",
|
||||
"app.audio.captions.speech.disabled": "Desabilitado",
|
||||
"app.audio.captions.speech.unsupported": "Seu navegador não suporta reconhecimento de fala. Seu áudio não será transcrito",
|
||||
"app.audio.captions.speech.auto": "Auto Detectar",
|
||||
"app.audio.captions.select.de-DE": "Alemão",
|
||||
"app.audio.captions.select.en-US": "Inglês",
|
||||
"app.audio.captions.select.es-ES": "Espanhol",
|
||||
|
@ -5,6 +5,9 @@ case "$1" in
|
||||
TARGET=/usr/local/bigbluebutton/bbb-transcription-controller/config/default.yml
|
||||
cp /usr/local/bigbluebutton/bbb-transcription-controller/config/default.example.yml $TARGET
|
||||
|
||||
touch /var/log/bigbluebutton/gladia-proxy.log
|
||||
chown bigbluebutton:bigbluebutton /var/log/bigbluebutton/gladia-proxy.log
|
||||
|
||||
startService bbb-transcription-controller|| echo "bbb-transcription-controller could not be registered or started"
|
||||
;;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user