diff --git a/lib/text2speech.js b/lib/text2speech.js index f0d50bc..c36f8a5 100644 --- a/lib/text2speech.js +++ b/lib/text2speech.js @@ -85,6 +85,82 @@ function Text2Speech(adapter, libs, options, sayIt) { callback(error, text, language, volume, seconds); } +/* + +tex 必填 合成的文本,使用UTF-8编码。小于2048个中文字或者英文数字。(文本在百度服务器内转换为GBK后,长度必须小于4096字节) +tok 必填 开放平台获取到的开发者access_token(见上面的“鉴权认证机制”段落) +cuid 必填 用户唯一标识,用来计算UV值。建议填写能区分用户的机器 MAC 地址或 IMEI 码,长度为60字符以内 +ctp 必填 客户端类型选择,web端填写固定值1 +lan 必填 固定值zh。语言选择,目前只有中英文混合模式,填写固定值zh +spd 选填 语速,取值0-15,默认为5中语速 +pit 选填 音调,取值0-15,默认为5中语调 +vol 选填 音量,取值0-15,默认为5中音量 +per 选填 发音人选择, 0为普通女声,1为普通男生,3为情感合成-度逍遥,4为情感合成-度丫丫,默认为普通女声 +aue 选填 3为mp3格式(默认); 4为pcm-16k;5为pcm-8k;6为wav(内容同pcm-16k); 注意aue=4或者6是语音识别要求的格式,但是音频内容不是语音识别要求的自然人发音,所以识别效果会受影响。 + +https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=QWM8zjGEEM2LfbGfQ2kn6pRA&client_secret=WhzukkSKnFagOZy3t4pD14bMvmmWiLOn +*/ + + function sayItGetSpeechBaidu(text, language, volume, callback) { + + var client_id = 'QWM8zjGEEM2LfbGfQ2kn6pRA'; + + var client_secret = 'WhzukkSKnFagOZy3t4pD14bMvmmWiLOn'; + + var url ='https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + client_id +'&client_secret=' +client_secret; + + + var cuid ='1234ABCD_9876'; + + var tok='24.a15e4c6d4d0a37a931db986c3fe17660.2592000.1544513699.282335-10607289'; + + + const options = { + host: 'tsn.baidu.com', + //port: 443, + path: '/text2audio?lan=zh&ctp=1&cuid=' + cuid +'&tok=' + tok + '&tex=' + encodeURI(text) + '&vol=9&per=0&spd=5&pit=5&aue=3' + }; + + if (!libs.https) libs.https = require('https'); + if (!libs.fs) libs.fs = require('fs'); + + let sounddata = ''; + + libs.https.get(options, res => { + res.setEncoding('binary'); + + res.on('data', chunk => sounddata += chunk); + + res.on('end', () => { + if (sounddata.length < 100) { + if (callback) callback('Cannot get file: received file is too short', text, language, volume, 0); + return; + } + + if (sounddata.toString().indexOf('302 Moved') !== -1) { + if (callback) callback('http://' + options.host + options.path + '\nCannot get file: ' + sounddata, text, language, volume, 0); + return; + } + + libs.fs.writeFile(MP3FILE, sounddata, 'binary', err => { + if (err) { + if (callback) callback('File error: ' + err, text, language, volume, 0); + } else { + that.getLength(MP3FILE, (error, seconds) => { + if (callback) callback(error, text, language, volume, seconds); + }); + } + }); + }); + + + }).on('error', err => { + sounddata = ''; + if (callback) callback('Cannot get file:' + err, text, language, volume, 0); + }); + + } + function sayItGetSpeechGoogle(text, language, volume, callback) { if (typeof volume === 'function') { callback = volume; @@ -432,6 +508,7 @@ function Text2Speech(adapter, libs, options, sayIt) { }; const ENGINES = { + 'baidu': sayItGetSpeechBaidu, 'google': sayItGetSpeechGoogle, 'yandex': sayItGetSpeechYandex, 'acapela': sayItGetSpeechAcapela,