Merge pull request #18822 from Tainan404/audio-migration
Refactor: Audio Captions migration
This commit is contained in:
commit
be8f4572da
@ -29,3 +29,4 @@ select_permissions:
|
||||
filter:
|
||||
meetingId:
|
||||
_eq: X-Hasura-MeetingId
|
||||
allow_aggregations: true
|
||||
|
@ -84,6 +84,15 @@ export interface ExternalVideo {
|
||||
updatedAt: Date;
|
||||
}
|
||||
|
||||
export interface ComponentsFlags {
|
||||
hasCaption: boolean;
|
||||
hasBreakoutRoom: boolean;
|
||||
hasExternalVideo: boolean;
|
||||
hasPoll: boolean;
|
||||
hasScreenshare: boolean;
|
||||
hasTimer: boolean;
|
||||
}
|
||||
|
||||
export interface Meeting {
|
||||
createdTime: number;
|
||||
disabledFeatures: Array<string>;
|
||||
@ -104,4 +113,5 @@ export interface Meeting {
|
||||
voiceSettings: VoiceSettings;
|
||||
breakoutPolicies: BreakoutPolicies;
|
||||
externalVideo: ExternalVideo;
|
||||
componentsFlags: ComponentsFlags;
|
||||
}
|
||||
|
@ -0,0 +1,296 @@
|
||||
import React, { useEffect, useRef } from 'react';
|
||||
import { layoutSelect } from '/imports/ui/components/layout/context';
|
||||
import { Layout } from '/imports/ui/components/layout/layoutTypes';
|
||||
import useCurrentUser from '/imports/ui/core/hooks/useCurrentUser';
|
||||
import ButtonEmoji from '/imports/ui/components/common/button/button-emoji/ButtonEmoji';
|
||||
import BBBMenu from '/imports/ui/components/common/menu/component';
|
||||
import Styled from './styles';
|
||||
import { getSpeechVoices, setAudioCaptions, setSpeechLocale } from '../service';
|
||||
import { defineMessages, useIntl } from 'react-intl';
|
||||
import { MenuSeparatorItemType, MenuOptionItemType } from '/imports/ui/components/common/menu/menuTypes';
|
||||
import useAudioCaptionEnable from '/imports/ui/core/local-states/useAudioCaptionEnable';
|
||||
import { User } from '/imports/ui/Types/user';
|
||||
import useMeeting from '/imports/ui/core/hooks/useMeeting';
|
||||
import { useMutation } from '@apollo/client';
|
||||
import { SET_SPEECH_LOCALE } from '/imports/ui/core/graphql/mutations/userMutations';
|
||||
|
||||
const intlMessages = defineMessages({
|
||||
start: {
|
||||
id: 'app.audio.captions.button.start',
|
||||
description: 'Start audio captions',
|
||||
},
|
||||
stop: {
|
||||
id: 'app.audio.captions.button.stop',
|
||||
description: 'Stop audio captions',
|
||||
},
|
||||
transcriptionSettings: {
|
||||
id: 'app.audio.captions.button.transcriptionSettings',
|
||||
description: 'Audio captions settings modal',
|
||||
},
|
||||
transcription: {
|
||||
id: 'app.audio.captions.button.transcription',
|
||||
description: 'Audio speech transcription label',
|
||||
},
|
||||
transcriptionOn: {
|
||||
id: 'app.switch.onLabel',
|
||||
},
|
||||
transcriptionOff: {
|
||||
id: 'app.switch.offLabel',
|
||||
},
|
||||
language: {
|
||||
id: 'app.audio.captions.button.language',
|
||||
description: 'Audio speech recognition language label',
|
||||
},
|
||||
'de-DE': {
|
||||
id: 'app.audio.captions.select.de-DE',
|
||||
description: 'Audio speech recognition german language',
|
||||
},
|
||||
'en-US': {
|
||||
id: 'app.audio.captions.select.en-US',
|
||||
description: 'Audio speech recognition english language',
|
||||
},
|
||||
'es-ES': {
|
||||
id: 'app.audio.captions.select.es-ES',
|
||||
description: 'Audio speech recognition spanish language',
|
||||
},
|
||||
'fr-FR': {
|
||||
id: 'app.audio.captions.select.fr-FR',
|
||||
description: 'Audio speech recognition french language',
|
||||
},
|
||||
'hi-ID': {
|
||||
id: 'app.audio.captions.select.hi-ID',
|
||||
description: 'Audio speech recognition indian language',
|
||||
},
|
||||
'it-IT': {
|
||||
id: 'app.audio.captions.select.it-IT',
|
||||
description: 'Audio speech recognition italian language',
|
||||
},
|
||||
'ja-JP': {
|
||||
id: 'app.audio.captions.select.ja-JP',
|
||||
description: 'Audio speech recognition japanese language',
|
||||
},
|
||||
'pt-BR': {
|
||||
id: 'app.audio.captions.select.pt-BR',
|
||||
description: 'Audio speech recognition portuguese language',
|
||||
},
|
||||
'ru-RU': {
|
||||
id: 'app.audio.captions.select.ru-RU',
|
||||
description: 'Audio speech recognition russian language',
|
||||
},
|
||||
'zh-CN': {
|
||||
id: 'app.audio.captions.select.zh-CN',
|
||||
description: 'Audio speech recognition chinese language',
|
||||
},
|
||||
});
|
||||
|
||||
interface AudioCaptionsButtonProps {
|
||||
isRTL: boolean;
|
||||
availableVoices: string[];
|
||||
currentSpeechLocale: string;
|
||||
isSupported: boolean;
|
||||
isVoiceUser: boolean;
|
||||
}
|
||||
|
||||
const DISABLED = '';
|
||||
|
||||
const AudioCaptionsButton: React.FC<AudioCaptionsButtonProps> = ({
|
||||
isRTL,
|
||||
currentSpeechLocale,
|
||||
availableVoices,
|
||||
isSupported,
|
||||
isVoiceUser,
|
||||
}) => {
|
||||
const intl = useIntl();
|
||||
const [active] = useAudioCaptionEnable();
|
||||
const [setSpeechLocaleMutation] = useMutation(SET_SPEECH_LOCALE);
|
||||
const setUserSpeechLocale = (speechLocale: string, provider: string) => {
|
||||
setSpeechLocaleMutation({
|
||||
variables: {
|
||||
locale: speechLocale,
|
||||
provider,
|
||||
},
|
||||
});
|
||||
};
|
||||
const isTranscriptionDisabled = () => currentSpeechLocale === DISABLED;
|
||||
const fallbackLocale = availableVoices.includes(navigator.language)
|
||||
? navigator.language
|
||||
: 'en-US'; // Assuming 'en-US' is the default fallback locale
|
||||
|
||||
const getSelectedLocaleValue = isTranscriptionDisabled()
|
||||
? fallbackLocale
|
||||
: currentSpeechLocale;
|
||||
|
||||
const selectedLocale = useRef(getSelectedLocaleValue);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isTranscriptionDisabled()) selectedLocale.current = getSelectedLocaleValue;
|
||||
}, [currentSpeechLocale]);
|
||||
|
||||
const shouldRenderChevron = isSupported && isVoiceUser;
|
||||
|
||||
const toggleTranscription = () => {
|
||||
setSpeechLocale(isTranscriptionDisabled() ? selectedLocale.current : DISABLED, setUserSpeechLocale);
|
||||
};
|
||||
|
||||
const getAvailableLocales = () => {
|
||||
let indexToInsertSeparator = -1;
|
||||
const availableVoicesObjectToMenu: (MenuOptionItemType | MenuSeparatorItemType)[] = availableVoices
|
||||
.map((availableVoice: string, index: number) => {
|
||||
if (availableVoice === availableVoices[0]) {
|
||||
indexToInsertSeparator = index;
|
||||
}
|
||||
return (
|
||||
{
|
||||
icon: '',
|
||||
label: intl.formatMessage(intlMessages[availableVoice as keyof typeof intlMessages]),
|
||||
key: availableVoice,
|
||||
iconRight: selectedLocale.current === availableVoice ? 'check' : null,
|
||||
customStyles: (selectedLocale.current === availableVoice) && Styled.SelectedLabel,
|
||||
disabled: isTranscriptionDisabled(),
|
||||
onClick: () => {
|
||||
selectedLocale.current = availableVoice;
|
||||
setSpeechLocale(selectedLocale.current, setUserSpeechLocale);
|
||||
},
|
||||
}
|
||||
);
|
||||
});
|
||||
if (indexToInsertSeparator >= 0) {
|
||||
availableVoicesObjectToMenu.splice(indexToInsertSeparator, 0, {
|
||||
key: 'separator-01',
|
||||
isSeparator: true,
|
||||
});
|
||||
}
|
||||
return [
|
||||
...availableVoicesObjectToMenu,
|
||||
];
|
||||
};
|
||||
|
||||
const getAvailableLocalesList = () => (
|
||||
[{
|
||||
key: 'availableLocalesList',
|
||||
label: intl.formatMessage(intlMessages.language),
|
||||
customStyles: Styled.TitleLabel,
|
||||
disabled: true,
|
||||
},
|
||||
...getAvailableLocales(),
|
||||
{
|
||||
key: 'divider',
|
||||
label: intl.formatMessage(intlMessages.transcription),
|
||||
customStyles: Styled.TitleLabel,
|
||||
disabled: true,
|
||||
},
|
||||
{
|
||||
key: 'separator-02',
|
||||
isSeparator: true,
|
||||
},
|
||||
{
|
||||
key: 'transcriptionStatus',
|
||||
label: intl.formatMessage(
|
||||
isTranscriptionDisabled()
|
||||
? intlMessages.transcriptionOn
|
||||
: intlMessages.transcriptionOff,
|
||||
),
|
||||
customStyles: isTranscriptionDisabled()
|
||||
? Styled.EnableTrascription : Styled.DisableTrascription,
|
||||
disabled: false,
|
||||
onClick: toggleTranscription,
|
||||
}]
|
||||
);
|
||||
const onToggleClick = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
setAudioCaptions(!active);
|
||||
};
|
||||
|
||||
const startStopCaptionsButton = (
|
||||
<Styled.ClosedCaptionToggleButton
|
||||
icon={active ? 'closed_caption' : 'closed_caption_stop'}
|
||||
label={intl.formatMessage(active ? intlMessages.stop : intlMessages.start)}
|
||||
color={active ? 'primary' : 'default'}
|
||||
ghost={!active}
|
||||
hideLabel
|
||||
circle
|
||||
size="lg"
|
||||
onClick={onToggleClick}
|
||||
/>
|
||||
);
|
||||
|
||||
return (
|
||||
shouldRenderChevron
|
||||
? (
|
||||
<Styled.SpanButtonWrapper>
|
||||
<BBBMenu
|
||||
trigger={(
|
||||
<>
|
||||
{ startStopCaptionsButton }
|
||||
<ButtonEmoji
|
||||
emoji="device_list_selector"
|
||||
hideLabel
|
||||
label={intl.formatMessage(intlMessages.transcriptionSettings)}
|
||||
tabIndex={0}
|
||||
rotate
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
actions={getAvailableLocalesList()}
|
||||
opts={{
|
||||
id: 'default-dropdown-menu',
|
||||
keepMounted: true,
|
||||
transitionDuration: 0,
|
||||
elevation: 3,
|
||||
getcontentanchorel: null,
|
||||
fullwidth: 'true',
|
||||
anchorOrigin: { vertical: 'top', horizontal: isRTL ? 'right' : 'left' },
|
||||
transformOrigin: { vertical: 'bottom', horizontal: isRTL ? 'right' : 'left' },
|
||||
}}
|
||||
/>
|
||||
</Styled.SpanButtonWrapper>
|
||||
) : startStopCaptionsButton
|
||||
);
|
||||
};
|
||||
|
||||
const AudioCaptionsButtonContainer: React.FC = () => {
|
||||
const isRTL = layoutSelect((i: Layout) => i.isRTL);
|
||||
const {
|
||||
data: currentUser,
|
||||
loading: currentUserLoading,
|
||||
} = useCurrentUser(
|
||||
(user: Partial<User>) => ({
|
||||
speechLocale: user.speechLocale,
|
||||
voice: user.voice,
|
||||
}),
|
||||
);
|
||||
|
||||
const {
|
||||
data: currentMeeting,
|
||||
loading: componentsFlagsLoading,
|
||||
} = useMeeting((m) => {
|
||||
return {
|
||||
componentsFlags: m.componentsFlags,
|
||||
};
|
||||
});
|
||||
if (currentUserLoading || componentsFlagsLoading) return null;
|
||||
if (!currentUser || !currentMeeting) return null;
|
||||
|
||||
const availableVoices = getSpeechVoices();
|
||||
const currentSpeechLocale = currentUser.speechLocale || '';
|
||||
const isSupported = availableVoices.length > 0;
|
||||
const isVoiceUser = !!currentUser.voice;
|
||||
|
||||
const { componentsFlags } = currentMeeting;
|
||||
|
||||
const hasCaptions = componentsFlags?.hasCaption;
|
||||
|
||||
if (!hasCaptions) return null;
|
||||
|
||||
return (
|
||||
<AudioCaptionsButton
|
||||
isRTL={isRTL}
|
||||
availableVoices={availableVoices}
|
||||
currentSpeechLocale={currentSpeechLocale}
|
||||
isSupported={isSupported}
|
||||
isVoiceUser={isVoiceUser}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default AudioCaptionsButtonContainer;
|
@ -0,0 +1,23 @@
|
||||
import { gql } from '@apollo/client';
|
||||
|
||||
export interface GetAudioCaptionsCountResponse {
|
||||
caption_aggregate: {
|
||||
aggregate: {
|
||||
count: number;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export const GET_AUDIO_CAPTIONS_COUNT = gql`
|
||||
subscription GetAudioCaptionsCount {
|
||||
caption_aggregate {
|
||||
aggregate {
|
||||
count(columns: captionId)
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
export default {
|
||||
GET_AUDIO_CAPTIONS_COUNT,
|
||||
};
|
@ -0,0 +1,62 @@
|
||||
import styled from 'styled-components';
|
||||
import Button from '/imports/ui/components/common/button/component';
|
||||
import Toggle from '/imports/ui/components/common/switch/component';
|
||||
import {
|
||||
colorWhite,
|
||||
colorPrimary,
|
||||
colorOffWhite,
|
||||
colorDangerDark,
|
||||
colorSuccess,
|
||||
} from '/imports/ui/stylesheets/styled-components/palette';
|
||||
|
||||
// @ts-ignore - as button comes from JS, we can't provide its props
|
||||
const ClosedCaptionToggleButton = styled(Button)`
|
||||
${({ ghost }) => ghost && `
|
||||
span {
|
||||
box-shadow: none;
|
||||
background-color: transparent !important;
|
||||
border-color: ${colorWhite} !important;
|
||||
}
|
||||
i {
|
||||
margin-top: .4rem;
|
||||
}
|
||||
`}
|
||||
`;
|
||||
|
||||
const SpanButtonWrapper = styled.span`
|
||||
position: relative;
|
||||
`;
|
||||
|
||||
const TranscriptionToggle = styled(Toggle)`
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
padding-left: 1em;
|
||||
`;
|
||||
|
||||
const TitleLabel = {
|
||||
fontWeight: 'bold',
|
||||
opacity: 1,
|
||||
};
|
||||
|
||||
const EnableTrascription = {
|
||||
color: colorSuccess,
|
||||
};
|
||||
|
||||
const DisableTrascription = {
|
||||
color: colorDangerDark,
|
||||
};
|
||||
|
||||
const SelectedLabel = {
|
||||
color: colorPrimary,
|
||||
backgroundColor: colorOffWhite,
|
||||
};
|
||||
|
||||
export default {
|
||||
ClosedCaptionToggleButton,
|
||||
SpanButtonWrapper,
|
||||
TranscriptionToggle,
|
||||
TitleLabel,
|
||||
EnableTrascription,
|
||||
DisableTrascription,
|
||||
SelectedLabel,
|
||||
};
|
@ -0,0 +1,169 @@
|
||||
import React from 'react';
|
||||
import { defineMessages, useIntl } from 'react-intl';
|
||||
import { useMutation } from '@apollo/client';
|
||||
|
||||
import {
|
||||
getSpeechVoices,
|
||||
isAudioTranscriptionEnabled,
|
||||
setSpeechLocale,
|
||||
useFixedLocale,
|
||||
} from '../service';
|
||||
import useCurrentUser from '/imports/ui/core/hooks/useCurrentUser';
|
||||
import { SET_SPEECH_LOCALE } from '/imports/ui/core/graphql/mutations/userMutations';
|
||||
|
||||
const intlMessages = defineMessages({
|
||||
title: {
|
||||
id: 'app.audio.captions.speech.title',
|
||||
description: 'Audio speech recognition title',
|
||||
},
|
||||
disabled: {
|
||||
id: 'app.audio.captions.speech.disabled',
|
||||
description: 'Audio speech recognition disabled',
|
||||
},
|
||||
unsupported: {
|
||||
id: 'app.audio.captions.speech.unsupported',
|
||||
description: 'Audio speech recognition unsupported',
|
||||
},
|
||||
'de-DE': {
|
||||
id: 'app.audio.captions.select.de-DE',
|
||||
description: 'Audio speech recognition german language',
|
||||
},
|
||||
'en-US': {
|
||||
id: 'app.audio.captions.select.en-US',
|
||||
description: 'Audio speech recognition english language',
|
||||
},
|
||||
'es-ES': {
|
||||
id: 'app.audio.captions.select.es-ES',
|
||||
description: 'Audio speech recognition spanish language',
|
||||
},
|
||||
'fr-FR': {
|
||||
id: 'app.audio.captions.select.fr-FR',
|
||||
description: 'Audio speech recognition french language',
|
||||
},
|
||||
'hi-ID': {
|
||||
id: 'app.audio.captions.select.hi-ID',
|
||||
description: 'Audio speech recognition indian language',
|
||||
},
|
||||
'it-IT': {
|
||||
id: 'app.audio.captions.select.it-IT',
|
||||
description: 'Audio speech recognition italian language',
|
||||
},
|
||||
'ja-JP': {
|
||||
id: 'app.audio.captions.select.ja-JP',
|
||||
description: 'Audio speech recognition japanese language',
|
||||
},
|
||||
'pt-BR': {
|
||||
id: 'app.audio.captions.select.pt-BR',
|
||||
description: 'Audio speech recognition portuguese language',
|
||||
},
|
||||
'ru-RU': {
|
||||
id: 'app.audio.captions.select.ru-RU',
|
||||
description: 'Audio speech recognition russian language',
|
||||
},
|
||||
'zh-CN': {
|
||||
id: 'app.audio.captions.select.zh-CN',
|
||||
description: 'Audio speech recognition chinese language',
|
||||
},
|
||||
});
|
||||
|
||||
interface AudioCaptionsSelectProps {
|
||||
isTranscriptionEnabled: boolean;
|
||||
speechLocale: string;
|
||||
speechVoices: string[];
|
||||
}
|
||||
|
||||
const AudioCaptionsSelect: React.FC<AudioCaptionsSelectProps> = ({
|
||||
isTranscriptionEnabled,
|
||||
speechLocale,
|
||||
speechVoices,
|
||||
}) => {
|
||||
const useLocaleHook = useFixedLocale();
|
||||
const intl = useIntl();
|
||||
const [setSpeechLocaleMutation] = useMutation(SET_SPEECH_LOCALE);
|
||||
|
||||
const setUserSpeechLocale = (speechLocale: string, provider: string) => {
|
||||
setSpeechLocaleMutation({
|
||||
variables: {
|
||||
locale: speechLocale,
|
||||
provider,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
if (!isTranscriptionEnabled || useLocaleHook) return null;
|
||||
|
||||
if (speechVoices.length === 0) {
|
||||
return (
|
||||
<div
|
||||
data-test="speechRecognitionUnsupported"
|
||||
style={{
|
||||
fontSize: '.75rem',
|
||||
padding: '1rem 0',
|
||||
}}
|
||||
>
|
||||
{`*${intl.formatMessage(intlMessages.unsupported)}`}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
const onChange = (e: React.ChangeEvent<HTMLSelectElement>) => {
|
||||
const { value } = e.target;
|
||||
setSpeechLocale(value, setUserSpeechLocale);
|
||||
};
|
||||
|
||||
return (
|
||||
<div style={{ padding: '1rem 0' }}>
|
||||
<label
|
||||
htmlFor="speechSelect"
|
||||
style={{ padding: '0 .5rem' }}
|
||||
>
|
||||
{intl.formatMessage(intlMessages.title)}
|
||||
</label>
|
||||
<select
|
||||
id="speechSelect"
|
||||
onChange={onChange}
|
||||
value={speechLocale}
|
||||
>
|
||||
<option
|
||||
key="disabled"
|
||||
value=""
|
||||
>
|
||||
{intl.formatMessage(intlMessages.disabled)}
|
||||
</option>
|
||||
{speechVoices.map((v) => (
|
||||
<option
|
||||
key={v}
|
||||
value={v}
|
||||
>
|
||||
{intl.formatMessage(intlMessages[v as keyof typeof intlMessages])}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const AudioCaptionsSelectContainer: React.FC = () => {
|
||||
const {
|
||||
data: currentUser,
|
||||
} = useCurrentUser(
|
||||
(user) => ({
|
||||
speechLocale: user.speechLocale,
|
||||
voice: user.voice,
|
||||
}),
|
||||
);
|
||||
const isEnabled = isAudioTranscriptionEnabled();
|
||||
const voices = getSpeechVoices();
|
||||
|
||||
if (!currentUser || !isEnabled || !voices) return null;
|
||||
|
||||
return (
|
||||
<AudioCaptionsSelect
|
||||
isTranscriptionEnabled={isEnabled}
|
||||
speechLocale={currentUser.speechLocale ?? ''}
|
||||
speechVoices={voices}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default AudioCaptionsSelectContainer;
|
@ -0,0 +1,89 @@
|
||||
import { useSubscription } from '@apollo/client';
|
||||
import React from 'react';
|
||||
import { Caption, GET_CAPTIONS, getCaptions } from './queries';
|
||||
import logger from '/imports/startup/client/logger';
|
||||
|
||||
import Styled from './styles';
|
||||
import useAudioCaptionEnable from '/imports/ui/core/local-states/useAudioCaptionEnable';
|
||||
|
||||
interface AudioCaptionsLiveProps {
|
||||
captions: Caption[];
|
||||
}
|
||||
|
||||
const AudioCaptionsLive: React.FC<AudioCaptionsLiveProps> = ({
|
||||
captions,
|
||||
}) => {
|
||||
return (
|
||||
<Styled.Wrapper>
|
||||
<>
|
||||
{
|
||||
captions.length > 0 ? captions.map((caption) => {
|
||||
const {
|
||||
user,
|
||||
captionText,
|
||||
} = caption;
|
||||
return (
|
||||
<Styled.CaptionWrapper>
|
||||
{!user ? null : (
|
||||
<Styled.UserAvatarWrapper>
|
||||
<Styled.UserAvatar
|
||||
avatar={user.avatar}
|
||||
color={user.color}
|
||||
moderator={user.isModerator}
|
||||
>
|
||||
{user.name.slice(0, 2)}
|
||||
</Styled.UserAvatar>
|
||||
</Styled.UserAvatarWrapper>
|
||||
)}
|
||||
<Styled.Captions hasContent>
|
||||
{!captionText ? '' : captionText}
|
||||
</Styled.Captions>
|
||||
<Styled.VisuallyHidden
|
||||
aria-atomic
|
||||
aria-live="polite"
|
||||
>
|
||||
{!captionText ? '' : captionText}
|
||||
</Styled.VisuallyHidden>
|
||||
</Styled.CaptionWrapper>
|
||||
);
|
||||
}) : null
|
||||
}
|
||||
</>
|
||||
</Styled.Wrapper>
|
||||
);
|
||||
};
|
||||
|
||||
const AudioCaptionsLiveContainer: React.FC = () => {
|
||||
const {
|
||||
data: AudioCaptionsLiveData,
|
||||
loading: AudioCaptionsLiveLoading,
|
||||
error: AudioCaptionsLiveError,
|
||||
} = useSubscription<getCaptions>(GET_CAPTIONS);
|
||||
|
||||
const [audioCaptionsEnable] = useAudioCaptionEnable();
|
||||
|
||||
if (AudioCaptionsLiveLoading) return null;
|
||||
|
||||
if (AudioCaptionsLiveError) {
|
||||
logger.error(AudioCaptionsLiveError);
|
||||
return (
|
||||
<div>
|
||||
{JSON.stringify(AudioCaptionsLiveError)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (!AudioCaptionsLiveData) return null;
|
||||
if (!AudioCaptionsLiveData.caption) return null;
|
||||
if (!AudioCaptionsLiveData.caption[0]) return null;
|
||||
|
||||
if (!audioCaptionsEnable) return null;
|
||||
|
||||
return (
|
||||
<AudioCaptionsLive
|
||||
captions={AudioCaptionsLiveData.caption}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default AudioCaptionsLiveContainer;
|
@ -0,0 +1,46 @@
|
||||
import { gql } from '@apollo/client';
|
||||
import { User } from '/imports/ui/Types/user';
|
||||
|
||||
export interface Caption {
|
||||
user: Pick<User, 'avatar' | 'color' | 'isModerator' | 'name'>;
|
||||
captionText: string;
|
||||
captionId: string;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
export interface getCaptions {
|
||||
caption: Caption[];
|
||||
}
|
||||
|
||||
export interface GetAudioCaptions {
|
||||
audio_caption: Array<{
|
||||
user: {
|
||||
avatar: string;
|
||||
color: string;
|
||||
isModerator: boolean;
|
||||
name: string;
|
||||
};
|
||||
transcript: string;
|
||||
transcriptId: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
export const GET_CAPTIONS = gql`
|
||||
subscription getCaptions {
|
||||
caption {
|
||||
user {
|
||||
avatar
|
||||
color
|
||||
isModerator
|
||||
name
|
||||
}
|
||||
captionText
|
||||
captionId
|
||||
createdAt
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
export default {
|
||||
GET_CAPTIONS,
|
||||
};
|
@ -0,0 +1,147 @@
|
||||
import styled from 'styled-components';
|
||||
|
||||
import {
|
||||
userIndicatorsOffset,
|
||||
} from '/imports/ui/stylesheets/styled-components/general';
|
||||
import {
|
||||
colorWhite,
|
||||
userListBg,
|
||||
colorSuccess,
|
||||
} from '/imports/ui/stylesheets/styled-components/palette';
|
||||
|
||||
type CaptionsProps = {
|
||||
hasContent: boolean;
|
||||
};
|
||||
|
||||
interface UserAvatarProps {
|
||||
color: string;
|
||||
moderator: boolean;
|
||||
avatar: string;
|
||||
emoji?: string;
|
||||
}
|
||||
|
||||
const Wrapper = styled.div`
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
`;
|
||||
|
||||
const CaptionWrapper = styled.div`
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
margin-bottom: 0.05rem;
|
||||
`;
|
||||
|
||||
const Captions = styled.div<CaptionsProps>`
|
||||
white-space: pre-line;
|
||||
word-wrap: break-word;
|
||||
font-family: Verdana, Arial, Helvetica, sans-serif;
|
||||
font-size: 1.5rem;
|
||||
background: #000000a0;
|
||||
color: white;
|
||||
${({ hasContent }) => hasContent && `
|
||||
padding: 0.5rem;
|
||||
`}
|
||||
`;
|
||||
|
||||
const VisuallyHidden = styled.div`
|
||||
position: absolute;
|
||||
overflow: hidden;
|
||||
clip: rect(0 0 0 0);
|
||||
height: 1px;
|
||||
width: 1px;
|
||||
margin: -1px;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
`;
|
||||
|
||||
const UserAvatarWrapper = styled.div`
|
||||
background: #000000a0;
|
||||
min-height: 3.25;
|
||||
padding: 0.5rem;
|
||||
text-transform: capitalize;
|
||||
width: 3.25rem;
|
||||
`;
|
||||
|
||||
const UserAvatar = styled.div<UserAvatarProps>`
|
||||
flex: 0 0 2.25rem;
|
||||
margin: 0px calc(0.5rem) 0px 0px;
|
||||
box-flex: 0;
|
||||
position: relative;
|
||||
height: 2.25rem;
|
||||
width: 2.25rem;
|
||||
border-radius: 50%;
|
||||
text-align: center;
|
||||
font-size: .85rem;
|
||||
border: 2px solid transparent;
|
||||
user-select: none;
|
||||
${
|
||||
({ color }: UserAvatarProps) => `
|
||||
background-color: ${color};
|
||||
`}
|
||||
}
|
||||
&:after,
|
||||
&:before {
|
||||
content: "";
|
||||
position: absolute;
|
||||
width: 0;
|
||||
height: 0;
|
||||
padding-top: .5rem;
|
||||
padding-right: 0;
|
||||
padding-left: 0;
|
||||
padding-bottom: 0;
|
||||
color: inherit;
|
||||
top: auto;
|
||||
left: auto;
|
||||
bottom: ${userIndicatorsOffset};
|
||||
right: ${userIndicatorsOffset};
|
||||
border: 1.5px solid ${userListBg};
|
||||
border-radius: 50%;
|
||||
background-color: ${colorSuccess};
|
||||
color: ${colorWhite};
|
||||
opacity: 0;
|
||||
font-family: 'bbb-icons';
|
||||
font-size: .65rem;
|
||||
line-height: 0;
|
||||
text-align: center;
|
||||
vertical-align: middle;
|
||||
letter-spacing: -.65rem;
|
||||
z-index: 1;
|
||||
[dir="rtl"] & {
|
||||
left: ${userIndicatorsOffset};
|
||||
right: auto;
|
||||
padding-right: .65rem;
|
||||
padding-left: 0;
|
||||
}
|
||||
}
|
||||
${({ moderator }: UserAvatarProps) => moderator && `
|
||||
border-radius: 5px;
|
||||
`}
|
||||
// ================ image ================
|
||||
${({ avatar, emoji }: UserAvatarProps) => avatar?.length !== 0 && !emoji && `
|
||||
background-image: url(${avatar});
|
||||
background-repeat: no-repeat;
|
||||
background-size: contain;
|
||||
`}
|
||||
// ================ image ================
|
||||
// ================ content ================
|
||||
color: ${colorWhite};
|
||||
font-size: 110%;
|
||||
text-transform: capitalize;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items:center;
|
||||
// ================ content ================
|
||||
& .react-loading-skeleton {
|
||||
height: 2.25rem;
|
||||
width: 2.25rem;
|
||||
}
|
||||
`;
|
||||
|
||||
export default {
|
||||
Wrapper,
|
||||
Captions,
|
||||
VisuallyHidden,
|
||||
UserAvatarWrapper,
|
||||
UserAvatar,
|
||||
CaptionWrapper,
|
||||
};
|
@ -0,0 +1,54 @@
|
||||
import { Meteor } from 'meteor/meteor';
|
||||
import { unique } from 'radash';
|
||||
import logger from '/imports/startup/client/logger';
|
||||
import { setAudioCaptionEnable } from '/imports/ui/core/local-states/useAudioCaptionEnable';
|
||||
import { isLiveTranscriptionEnabled } from '/imports/ui/services/features';
|
||||
|
||||
const CONFIG = Meteor.settings.public.app.audioCaptions;
|
||||
const PROVIDER = CONFIG.provider;
|
||||
const LANGUAGES = CONFIG.language.available;
|
||||
|
||||
export const isAudioTranscriptionEnabled = () => isLiveTranscriptionEnabled();
|
||||
|
||||
export const isWebSpeechApi = () => PROVIDER === 'webspeech';
|
||||
|
||||
export const getSpeechVoices = () => {
|
||||
if (!isWebSpeechApi()) return LANGUAGES;
|
||||
|
||||
return unique(
|
||||
window
|
||||
.speechSynthesis
|
||||
.getVoices()
|
||||
.map((v) => v.lang)
|
||||
.filter((v) => LANGUAGES.includes(v)),
|
||||
);
|
||||
};
|
||||
|
||||
export const setAudioCaptions = (value: boolean) => {
|
||||
setAudioCaptionEnable(value);
|
||||
// @ts-ignore - Exist while we have meteor in the project
|
||||
Session.set('audioCaptions', value);
|
||||
};
|
||||
|
||||
export const setSpeechLocale = (value: string, setUserSpeechLocale: (a: string, b: string) => void) => {
|
||||
const voices = getSpeechVoices();
|
||||
|
||||
if (voices.includes(value) || value === '') {
|
||||
setUserSpeechLocale(value, CONFIG.provider);
|
||||
} else {
|
||||
logger.error({
|
||||
logCode: 'captions_speech_locale',
|
||||
}, 'Captions speech set locale error');
|
||||
}
|
||||
};
|
||||
|
||||
export const useFixedLocale = () => isAudioTranscriptionEnabled() && CONFIG.language.forceLocale;
|
||||
|
||||
export default {
|
||||
getSpeechVoices,
|
||||
isAudioTranscriptionEnabled,
|
||||
setSpeechLocale,
|
||||
setAudioCaptions,
|
||||
isWebSpeechApi,
|
||||
useFixedLocale,
|
||||
};
|
@ -0,0 +1,266 @@
|
||||
import React, {
|
||||
useCallback,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
} from 'react';
|
||||
// @ts-ignore - it's has no types
|
||||
import { diff } from '@mconf/bbb-diff';
|
||||
import { useReactiveVar, useMutation } from '@apollo/client';
|
||||
import { throttle } from 'radash';
|
||||
import {
|
||||
SpeechRecognitionAPI,
|
||||
generateId,
|
||||
getLocale,
|
||||
hasSpeechRecognitionSupport,
|
||||
isLocaleValid,
|
||||
localeAsDefaultSelected,
|
||||
setSpeechVoices,
|
||||
updateFinalTranscript,
|
||||
useFixedLocale,
|
||||
} from './service';
|
||||
import logger from '/imports/startup/client/logger';
|
||||
import AudioManager from '/imports/ui/services/audio-manager';
|
||||
import useCurrentUser from '/imports/ui/core/hooks/useCurrentUser';
|
||||
import { isAudioTranscriptionEnabled, isWebSpeechApi, setSpeechLocale } from '../service';
|
||||
import { SET_SPEECH_LOCALE } from '/imports/ui/core/graphql/mutations/userMutations';
|
||||
import { SUBMIT_TEXT } from './mutations';
|
||||
|
||||
const THROTTLE_TIMEOUT = 200;
|
||||
|
||||
type SpeechRecognitionEvent = {
|
||||
resultIndex: number;
|
||||
results: SpeechRecognitionResult[];
|
||||
}
|
||||
|
||||
type SpeechRecognitionErrorEvent = {
|
||||
error: string;
|
||||
message: string;
|
||||
}
|
||||
|
||||
interface AudioCaptionsSpeechProps {
|
||||
locale: string;
|
||||
connected: boolean;
|
||||
}
|
||||
const speechHasStarted = {
|
||||
started: false,
|
||||
};
|
||||
const AudioCaptionsSpeech: React.FC<AudioCaptionsSpeechProps> = ({
|
||||
locale,
|
||||
connected,
|
||||
}) => {
|
||||
const resultRef = useRef({
|
||||
id: generateId(),
|
||||
transcript: '',
|
||||
isFinal: true,
|
||||
});
|
||||
|
||||
const idleRef = useRef(true);
|
||||
const speechRecognitionRef = useRef<ReturnType<typeof SpeechRecognitionAPI>>(null);
|
||||
const prevIdRef = useRef('');
|
||||
const prevTranscriptRef = useRef('');
|
||||
const [setSpeechLocaleMutation] = useMutation(SET_SPEECH_LOCALE);
|
||||
|
||||
const setUserSpeechLocale = (speechLocale: string, provider: string) => {
|
||||
setSpeechLocaleMutation({
|
||||
variables: {
|
||||
locale: speechLocale,
|
||||
provider,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const initSpeechRecognition = () => {
|
||||
if (!isAudioTranscriptionEnabled() && !isWebSpeechApi()) return null;
|
||||
|
||||
if (!hasSpeechRecognitionSupport()) return null;
|
||||
|
||||
setSpeechVoices();
|
||||
const speechRecognition = new SpeechRecognitionAPI();
|
||||
|
||||
speechRecognition.continuous = true;
|
||||
speechRecognition.interimResults = true;
|
||||
|
||||
if (useFixedLocale() || localeAsDefaultSelected()) {
|
||||
setSpeechLocale(getLocale(), setUserSpeechLocale);
|
||||
} else {
|
||||
setSpeechLocale(navigator.language, setUserSpeechLocale);
|
||||
}
|
||||
|
||||
return speechRecognition;
|
||||
};
|
||||
|
||||
const [submitText] = useMutation(SUBMIT_TEXT);
|
||||
const captionSubmitText = (
|
||||
id: string,
|
||||
transcript: string,
|
||||
locale: string,
|
||||
isFinal: boolean = false,
|
||||
) => {
|
||||
// If it's a new sentence
|
||||
if (id !== prevIdRef.current) {
|
||||
prevIdRef.current = id;
|
||||
prevTranscriptRef.current = '';
|
||||
}
|
||||
|
||||
const transcriptDiff = diff(prevTranscriptRef.current, transcript);
|
||||
|
||||
let start = 0;
|
||||
let end = 0;
|
||||
let text = '';
|
||||
if (transcriptDiff) {
|
||||
start = transcriptDiff.start;
|
||||
end = transcriptDiff.end;
|
||||
text = transcriptDiff.text;
|
||||
}
|
||||
|
||||
// Stores current transcript as previous
|
||||
prevTranscriptRef.current = transcript;
|
||||
|
||||
submitText({
|
||||
variables: {
|
||||
transcriptId: id,
|
||||
start,
|
||||
end,
|
||||
text,
|
||||
transcript,
|
||||
locale,
|
||||
isFinal,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const throttledTranscriptUpdate = useMemo(() => throttle(
|
||||
{ interval: THROTTLE_TIMEOUT },
|
||||
captionSubmitText,
|
||||
), []);
|
||||
|
||||
const onEnd = useCallback(() => {
|
||||
stop();
|
||||
}, []);
|
||||
const onError = useCallback((event: SpeechRecognitionErrorEvent) => {
|
||||
stop();
|
||||
logger.error({
|
||||
logCode: 'captions_speech_recognition',
|
||||
extraInfo: {
|
||||
error: event.error,
|
||||
message: event.message,
|
||||
},
|
||||
}, 'Captions speech recognition error');
|
||||
}, []);
|
||||
|
||||
const onResult = useCallback((event: SpeechRecognitionEvent) => {
|
||||
const {
|
||||
resultIndex,
|
||||
results,
|
||||
} = event;
|
||||
|
||||
const { id } = resultRef.current;
|
||||
|
||||
const { transcript } = results[resultIndex][0];
|
||||
const { isFinal } = results[resultIndex];
|
||||
|
||||
resultRef.current.transcript = transcript;
|
||||
resultRef.current.isFinal = isFinal;
|
||||
|
||||
if (isFinal) {
|
||||
throttledTranscriptUpdate(id, transcript, locale, true);
|
||||
resultRef.current.id = generateId();
|
||||
} else {
|
||||
throttledTranscriptUpdate(id, transcript, locale, false);
|
||||
}
|
||||
}, [locale]);
|
||||
|
||||
const stop = useCallback(() => {
|
||||
idleRef.current = true;
|
||||
if (speechRecognitionRef.current) {
|
||||
const {
|
||||
isFinal,
|
||||
transcript,
|
||||
} = resultRef.current;
|
||||
|
||||
if (!isFinal) {
|
||||
const { id } = resultRef.current;
|
||||
updateFinalTranscript(id, transcript, locale);
|
||||
speechRecognitionRef.current.abort();
|
||||
} else {
|
||||
speechRecognitionRef.current.stop();
|
||||
speechHasStarted.started = false;
|
||||
}
|
||||
}
|
||||
}, [locale]);
|
||||
|
||||
const start = (settedLocale: string) => {
|
||||
if (speechRecognitionRef.current && isLocaleValid(settedLocale)) {
|
||||
speechRecognitionRef.current.lang = settedLocale;
|
||||
try {
|
||||
resultRef.current.id = generateId();
|
||||
speechRecognitionRef.current.start();
|
||||
idleRef.current = false;
|
||||
} catch (event: unknown) {
|
||||
onError(event as SpeechRecognitionErrorEvent);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
speechRecognitionRef.current = initSpeechRecognition();
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (speechRecognitionRef.current) {
|
||||
speechRecognitionRef.current.onend = () => onEnd();
|
||||
speechRecognitionRef.current.onerror = (event: SpeechRecognitionErrorEvent) => onError(event);
|
||||
speechRecognitionRef.current.onresult = (event: SpeechRecognitionEvent) => onResult(event);
|
||||
}
|
||||
}, [speechRecognitionRef.current]);
|
||||
|
||||
const connectedRef = useRef(connected);
|
||||
const localeRef = useRef(locale);
|
||||
useEffect(() => {
|
||||
// Connected
|
||||
if (!connectedRef.current && connected) {
|
||||
start(locale);
|
||||
connectedRef.current = connected;
|
||||
} else if (connectedRef.current && !connected) {
|
||||
// Disconnected
|
||||
stop();
|
||||
connectedRef.current = connected;
|
||||
} else if (localeRef.current !== locale) {
|
||||
// Locale changed
|
||||
if (connectedRef.current && connected) {
|
||||
stop();
|
||||
start(locale);
|
||||
localeRef.current = locale;
|
||||
}
|
||||
}
|
||||
}, [connected, locale]);
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
const AudioCaptionsSpeechContainer: React.FC = () => {
|
||||
/* eslint no-underscore-dangle: 0 */
|
||||
// @ts-ignore - temporary while hybrid (meteor+GraphQl)
|
||||
const isConnected = useReactiveVar(AudioManager._isConnected.value) as boolean;
|
||||
|
||||
const {
|
||||
data: currentUser,
|
||||
} = useCurrentUser(
|
||||
(user) => ({
|
||||
speechLocale: user.speechLocale,
|
||||
voice: user.voice,
|
||||
}),
|
||||
);
|
||||
|
||||
if (!currentUser) return null;
|
||||
|
||||
return (
|
||||
<AudioCaptionsSpeech
|
||||
locale={currentUser.speechLocale ?? ''}
|
||||
connected={isConnected}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default AudioCaptionsSpeechContainer;
|
@ -0,0 +1,27 @@
|
||||
import { gql } from '@apollo/client';
|
||||
|
||||
export const SUBMIT_TEXT = gql`
|
||||
mutation SubmitText(
|
||||
$transcriptId: String!
|
||||
$start: Int!
|
||||
$end: Int!
|
||||
$text: String!
|
||||
$transcript: String!
|
||||
$locale: String!
|
||||
$isFinal: Boolean!
|
||||
) {
|
||||
captionSubmitText(
|
||||
transcriptId: $transcriptId,
|
||||
start: $start,
|
||||
end: $end,
|
||||
text: $text,
|
||||
transcript: $transcript,
|
||||
locale: $locale,
|
||||
isFinal: $isFinal,
|
||||
)
|
||||
}
|
||||
`;
|
||||
|
||||
export default {
|
||||
SUBMIT_TEXT,
|
||||
};
|
@ -0,0 +1,100 @@
|
||||
import { Meteor } from 'meteor/meteor';
|
||||
import { isAudioTranscriptionEnabled } from '../service';
|
||||
import Auth from '/imports/ui/services/auth';
|
||||
import deviceInfo from '/imports/utils/deviceInfo';
|
||||
import { unique } from 'radash';
|
||||
// @ts-ignore - bbb-diff is not typed
|
||||
import { diff } from '@mconf/bbb-diff';
|
||||
import { Session } from 'meteor/session';
|
||||
import { throttle } from '/imports/utils/throttle';
|
||||
import { makeCall } from '/imports/ui/services/api';
|
||||
|
||||
const CONFIG = Meteor.settings.public.app.audioCaptions;
|
||||
const LANGUAGES = CONFIG.language.available;
|
||||
const VALID_ENVIRONMENT = !deviceInfo.isMobile || CONFIG.mobile;
|
||||
const THROTTLE_TIMEOUT = 2000;
|
||||
// Reason: SpeechRecognition is not in window type definition
|
||||
// Fix based on: https://stackoverflow.com/questions/41740683/speechrecognition-and-speechsynthesis-in-typescript
|
||||
/* eslint @typescript-eslint/no-explicit-any: 0 */
|
||||
export const SpeechRecognitionAPI = (window as any).SpeechRecognition
|
||||
|| (window as any).webkitSpeechRecognition;
|
||||
|
||||
export const generateId = () => `${Auth.userID}-${Date.now()}`;
|
||||
|
||||
export const hasSpeechRecognitionSupport = () => typeof SpeechRecognitionAPI !== 'undefined'
|
||||
&& typeof window.speechSynthesis !== 'undefined'
|
||||
&& VALID_ENVIRONMENT;
|
||||
|
||||
export const setSpeechVoices = () => {
|
||||
if (!hasSpeechRecognitionSupport()) return;
|
||||
|
||||
Session.set('speechVoices', unique(window.speechSynthesis.getVoices().map((v) => v.lang)));
|
||||
};
|
||||
|
||||
export const useFixedLocale = () => isAudioTranscriptionEnabled() && CONFIG.language.forceLocale;
|
||||
|
||||
export const localeAsDefaultSelected = () => CONFIG.language.defaultSelectLocale;
|
||||
|
||||
export const getLocale = () => {
|
||||
const { locale } = CONFIG.language;
|
||||
if (locale === 'browserLanguage') return navigator.language;
|
||||
if (locale === 'disabled') return '';
|
||||
return locale;
|
||||
};
|
||||
|
||||
let prevId: string = '';
|
||||
let prevTranscript: string = '';
|
||||
const updateTranscript = (
|
||||
id: string,
|
||||
transcript: string,
|
||||
locale: string,
|
||||
isFinal: boolean,
|
||||
) => {
|
||||
// If it's a new sentence
|
||||
if (id !== prevId) {
|
||||
prevId = id;
|
||||
prevTranscript = '';
|
||||
}
|
||||
|
||||
const transcriptDiff = diff(prevTranscript, transcript);
|
||||
|
||||
let start = 0;
|
||||
let end = 0;
|
||||
let text = '';
|
||||
if (transcriptDiff) {
|
||||
start = transcriptDiff.start;
|
||||
end = transcriptDiff.end;
|
||||
text = transcriptDiff.text;
|
||||
}
|
||||
|
||||
// Stores current transcript as previous
|
||||
prevTranscript = transcript;
|
||||
|
||||
makeCall('updateTranscript', id, start, end, text, transcript, locale, isFinal);
|
||||
};
|
||||
|
||||
const throttledTranscriptUpdate = throttle(updateTranscript, THROTTLE_TIMEOUT, {
|
||||
leading: false,
|
||||
trailing: true,
|
||||
});
|
||||
|
||||
export const updateInterimTranscript = (id: string, transcript: string, locale: string) => {
|
||||
throttledTranscriptUpdate(id, transcript, locale, false);
|
||||
};
|
||||
|
||||
export const updateFinalTranscript = (id: string, transcript: string, locale: string) => {
|
||||
throttledTranscriptUpdate.cancel();
|
||||
updateTranscript(id, transcript, locale, true);
|
||||
};
|
||||
|
||||
export const isLocaleValid = (locale: string) => LANGUAGES.includes(locale);
|
||||
|
||||
export default {
|
||||
generateId,
|
||||
getLocale,
|
||||
localeAsDefaultSelected,
|
||||
useFixedLocale,
|
||||
setSpeechVoices,
|
||||
hasSpeechRecognitionSupport,
|
||||
isLocaleValid,
|
||||
};
|
@ -154,7 +154,9 @@ const CaptionsButton = ({
|
||||
};
|
||||
|
||||
const toggleTranscription = () => {
|
||||
SpeechService.setSpeechLocale(isTranscriptionDisabled() ? selectedLocale.current : DISABLED, setUserSpeechLocale);
|
||||
SpeechService.setSpeechLocale(
|
||||
isTranscriptionDisabled() ? selectedLocale.current : DISABLED, setUserSpeechLocale,
|
||||
);
|
||||
};
|
||||
|
||||
const getAvailableLocalesList = () => (
|
||||
|
@ -4,10 +4,11 @@ import Service from '/imports/ui/components/audio/captions/service';
|
||||
import Button from './component';
|
||||
import SpeechService from '/imports/ui/components/audio/captions/speech/service';
|
||||
import AudioService from '/imports/ui/components/audio/service';
|
||||
import AudioCaptionsButtonContainer from '../../audio-graphql/audio-captions/button/component';
|
||||
|
||||
const Container = (props) => <Button {...props} />;
|
||||
|
||||
export default withTracker(() => {
|
||||
withTracker(() => {
|
||||
const isRTL = document.documentElement.getAttribute('dir') === 'rtl';
|
||||
const availableVoices = SpeechService.getSpeechVoices();
|
||||
const currentSpeechLocale = SpeechService.getSpeechLocale();
|
||||
@ -23,3 +24,5 @@ export default withTracker(() => {
|
||||
isVoiceUser,
|
||||
};
|
||||
})(Container);
|
||||
|
||||
export default AudioCaptionsButtonContainer;
|
||||
|
@ -2,10 +2,11 @@ import React from 'react';
|
||||
import { withTracker } from 'meteor/react-meteor-data';
|
||||
import Service from '/imports/ui/components/audio/captions/service';
|
||||
import LiveCaptions from './component';
|
||||
import AudioCaptionsLiveContainer from '../../audio-graphql/audio-captions/live/component';
|
||||
|
||||
const Container = (props) => <LiveCaptions {...props} />;
|
||||
|
||||
export default withTracker(() => {
|
||||
withTracker(() => {
|
||||
const {
|
||||
transcriptId,
|
||||
transcript,
|
||||
@ -16,3 +17,5 @@ export default withTracker(() => {
|
||||
transcriptId,
|
||||
};
|
||||
})(Container);
|
||||
|
||||
export default AudioCaptionsLiveContainer;
|
||||
|
@ -2,11 +2,14 @@ import React from 'react';
|
||||
import { withTracker } from 'meteor/react-meteor-data';
|
||||
import Service from '/imports/ui/components/audio/captions/speech/service';
|
||||
import Select from './component';
|
||||
import AudioCaptionsSelectContainer from '../../audio-graphql/audio-captions/captions/component';
|
||||
|
||||
const Container = (props) => <Select {...props} />;
|
||||
|
||||
export default withTracker(() => ({
|
||||
withTracker(() => ({
|
||||
enabled: Service.isEnabled(),
|
||||
locale: Service.getSpeechLocale(),
|
||||
voices: Service.getSpeechVoices(),
|
||||
}))(Container);
|
||||
|
||||
export default AudioCaptionsSelectContainer;
|
||||
|
@ -4,6 +4,7 @@ import { useMutation } from '@apollo/client';
|
||||
import { diff } from '@mconf/bbb-diff';
|
||||
import Service from './service';
|
||||
import Speech from './component';
|
||||
import AudioCaptionsSpeechContainer from '../../audio-graphql/audio-captions/speech/component';
|
||||
import { SET_SPEECH_LOCALE } from '/imports/ui/core/graphql/mutations/userMutations';
|
||||
import { SUBMIT_TEXT } from './mutations';
|
||||
|
||||
@ -66,7 +67,7 @@ const Container = (props) => {
|
||||
);
|
||||
};
|
||||
|
||||
export default withTracker(() => {
|
||||
withTracker(() => {
|
||||
const {
|
||||
locale,
|
||||
connected,
|
||||
@ -79,3 +80,5 @@ export default withTracker(() => {
|
||||
talking,
|
||||
};
|
||||
})(Container);
|
||||
|
||||
export default AudioCaptionsSpeechContainer;
|
||||
|
@ -1,6 +1,6 @@
|
||||
import { gql } from '@apollo/client';
|
||||
|
||||
const CURRENT_USER_SUBSCRIPTION = gql`
|
||||
export const CURRENT_USER_SUBSCRIPTION = gql`
|
||||
subscription userCurrentSubscription {
|
||||
user_current {
|
||||
authed
|
||||
@ -46,6 +46,7 @@ subscription userCurrentSubscription {
|
||||
registeredOn
|
||||
role
|
||||
userId
|
||||
speechLocale
|
||||
voice {
|
||||
joined
|
||||
muted
|
||||
|
@ -85,6 +85,14 @@ const MEETING_SUBSCRIPTION = gql`
|
||||
stoppedSharingAt
|
||||
updatedAt
|
||||
}
|
||||
componentsFlags {
|
||||
hasCaption
|
||||
hasBreakoutRoom
|
||||
hasExternalVideo
|
||||
hasPoll
|
||||
hasScreenshare
|
||||
hasTimer
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
@ -0,0 +1,7 @@
|
||||
import createUseLocalState from './createUseLocalState';
|
||||
|
||||
const initialAudioCaptionEnable: boolean = false;
|
||||
const [useAudioCaptionEnable, setAudioCaptionEnable] = createUseLocalState<boolean>(initialAudioCaptionEnable);
|
||||
|
||||
export default useAudioCaptionEnable;
|
||||
export { setAudioCaptionEnable };
|
@ -79,7 +79,7 @@ class AudioManager {
|
||||
isHangingUp: makeVar(false),
|
||||
isListenOnly: makeVar(false),
|
||||
isEchoTest: makeVar(false),
|
||||
isTalking: false,
|
||||
isTalking: makeVar(false),
|
||||
isWaitingPermissions: false,
|
||||
error: null,
|
||||
muteHandle: null,
|
||||
|
Loading…
Reference in New Issue
Block a user