fix(captions): "not supported" in chrome

Fixes a case where the locale selector don't show up in Chrome when using
'webspeech' provider.
And adds missing fields to the webspeech transcription messages, after the
addition of some new parameters to those messages with the open
transcription server.
This commit is contained in:
Arthurk12 2023-02-14 16:07:24 -03:00 committed by Lucas Fialho Zawacki
parent eafa0f200e
commit 3b871e5ca2
2 changed files with 9 additions and 7 deletions

View File

@ -3,7 +3,7 @@ import RedisPubSub from '/imports/startup/server/redis';
import { extractCredentials } from '/imports/api/common/server/helpers';
import Logger from '/imports/startup/server/logger';
export default function updateTranscript(transcriptId, start, end, text, transcript, locale) {
export default function updateTranscript(transcriptId, start, end, text, transcript, locale, isFinal) {
try {
const REDIS_CONFIG = Meteor.settings.private.redis;
const CHANNEL = REDIS_CONFIG.channels.toAkkaApps;
@ -19,6 +19,7 @@ export default function updateTranscript(transcriptId, start, end, text, transcr
check(text, String);
check(transcript, String);
check(locale, String);
check(isFinal, Boolean);
// Ignore irrelevant updates
if (start !== -1 && end !== -1) {
@ -29,6 +30,7 @@ export default function updateTranscript(transcriptId, start, end, text, transcr
text,
transcript,
locale,
result: isFinal,
};
RedisPubSub.publishUserMessage(CHANNEL, EVENT_NAME, meetingId, requesterUserId, payload);

View File

@ -81,7 +81,7 @@ const initSpeechRecognition = () => {
let prevId = '';
let prevTranscript = '';
const updateTranscript = (id, transcript, locale) => {
const updateTranscript = (id, transcript, locale, isFinal) => {
// If it's a new sentence
if (id !== prevId) {
prevId = id;
@ -102,7 +102,7 @@ const updateTranscript = (id, transcript, locale) => {
// Stores current transcript as previous
prevTranscript = transcript;
makeCall('updateTranscript', id, start, end, text, transcript, locale);
makeCall('updateTranscript', id, start, end, text, transcript, locale, isFinal);
};
const throttledTranscriptUpdate = throttle(updateTranscript, THROTTLE_TIMEOUT, {
@ -111,12 +111,12 @@ const throttledTranscriptUpdate = throttle(updateTranscript, THROTTLE_TIMEOUT, {
});
const updateInterimTranscript = (id, transcript, locale) => {
throttledTranscriptUpdate(id, transcript, locale);
throttledTranscriptUpdate(id, transcript, locale, false);
};
const updateFinalTranscript = (id, transcript, locale) => {
throttledTranscriptUpdate.cancel();
updateTranscript(id, transcript, locale);
updateTranscript(id, transcript, locale, true);
};
const getSpeechLocale = (userId = Auth.userID) => {
@ -133,7 +133,7 @@ const isLocaleValid = (locale) => LANGUAGES.includes(locale);
const isEnabled = () => isLiveTranscriptionEnabled();
const isWebSpeechApi = () => PROVIDER === 'webspeech' && hasSpeechRecognitionSupport() && hasSpeechLocale();
const isWebSpeechApi = () => PROVIDER === 'webspeech';
const isVosk = () => PROVIDER === 'vosk';
@ -141,7 +141,7 @@ const isWhispering = () => PROVIDER === 'whisper';
const isDeepSpeech = () => PROVIDER === 'deepSpeech'
const isActive = () => isEnabled() && (isWebSpeechApi() || isVosk() || isWhispering() || isDeepSpeech());
const isActive = () => isEnabled() && ((isWebSpeechApi() && hasSpeechLocale()) || isVosk() || isWhispering() || isDeepSpeech());
const getStatus = () => {
const active = isActive();