Google Speech API不能在Node.js中工作

我在Node.js中使用Google Cloud Speech API。 当我发送一个本地audio文件时会给出结果,但是当我尝试发送实时stream时,它会在一秒钟内停止。 谁能帮我这个?

我正在使用这个文件: recognition.js

  1. 这是我将audio文件传递给Google语音API时的代码。 这里它工作正常。我传递存储在fileName中的audio文件发送到谷歌语音API

    function sample() { const projectId = 'project id'; let file = "conf.json" //google exported this for you var speech = require('@google-cloud/speech')({ projectId: 'project id', keyFilename: 'Speech to text-a5ff6058e586.json' }); const fs = require('fs'); const fileName = 'C:/Users/nap1225/Downloads/audio-files/audio_001.wav'; // const fileName = 'C:/xampp/htdocs/SpeechWatson/public/audio/Us_English_Broadband_Sample_2.wav'; // const fileName = 'Sample 1.wav'; // const fileName = 'C:/Users/nap1225/Desktop/dolo.wav'; // Reads a local audio file and converts it to base64 const fileMp3 = fs.readFileSync(fileName); const audioBytes = fileMp3.toString('base64'); const audio = { content: audioBytes }; const config = { encoding: 'LINEAR16', sampleRateHertz: 16000, languageCode: 'en-US', speechContexts: { "phrases": ["refsum"] } }; const request = { audio: audio, config: config }; speech.recognize(request) .then((results) => { const transcription = results[0].results[0].alternatives[0].transcript; console.log(`Transcription: `, transcription); }) .catch((err) => { console.error('ERROR:', err); }); } sample(); 

    2.这里是我发送streamaudio的代码。当我执行这个代码时,它正在logging1秒钟并停止

     function streamingMicRecognize () { // [START speech_streaming_mic_recognize] // Imports the Google Cloud client library const record = require('node-record-lpcm16'); const projectId = 'project id'; let file="conf.json"//google exported this for you const speech = require('@google-cloud/speech')({ projectId: 'project id', keyFilename: 'Speech to text-a5ff6058e586.json' }); // Instantiates a client //const speech = Speech(); // The encoding of the audio file, eg 'LINEAR16' const encoding = 'LINEAR16'; // The sample rate of the audio file in hertz, eg 16000 const sampleRateHertz = 16000; // The BCP-47 language code to use, eg 'en-US' const languageCode = 'en-US'; const request = { config: { encoding: encoding, sampleRateHertz: sampleRateHertz, languageCode: languageCode }, interimResults: true, // If you want interim results, set this to true singleUtterance: false }; // Create a recognize stream const recognizeStream = speech.streamingRecognize(request) .on('error', console.error) .on('data', (data) => process.stdout.write( (data.results[0] && data.results[0].alternatives[0]) ? `Transcription: ${data.results[0].alternatives[0].transcript}\n` : `\n\nReached transcription time limit, press Ctrl+C\n`)); // Start recording and send the microphone input to the Speech API record .start({ sampleRateHertz: sampleRateHertz, threshold: 20, // Other options, see https://www.npmjs.com/package/node-record-lpcm16#options verbose: false, recordProgram: 'sox', // Try also "arecord" or "sox" silence: '10.0', device : 'plughw:0' // device : settingsRecord.audio_input }) .on('error', console.error) .pipe(recognizeStream); console.log('Listening, press Ctrl+C to stop.'); // [END speech_streaming_mic_recognize] /* .command( `listen`, `Detects speech in a microphone input stream. This command requires that you have SoX installed and available in your $PATH. See https://www.npmjs.com/package/node-record-lpcm16#dependencies`, {}, (opts) => streamingMicRecognize(opts.encoding, opts.sampleRateHertz, opts.languageCode) ) .options({ encoding: { alias: 'e', default: 'LINEAR16', global: true, requiresArg: true, type: 'string' }, sampleRateHertz: { alias: 'r', default: 16000, global: true, requiresArg: true, type: 'number' }, languageCode: { alias: 'l', default: 'en-US', global: true, requiresArg: true, type: 'string' } }) */ } streamingMicRecognize();