0

I would like to use the Twilio verb to send audio to Azure's Continuous Speech to Text API to do real time transcription on a call. I've successfully used the code below to send an audio file to Azure Speech to Text but when I push the Twilio Stream data I get no transcription results. What am I doing wrong?

#!/usr/bin/env node
const WebSocket = require("ws");
const fs = require("fs");
const mulaw = require("mulaw-js");
const base64 = require("js-base64");
const express = require("express");
const app = express();
const server = require("http").createServer(app);
const wss = new WebSocket.Server({ server });
const subscriptionKey = "6038f4a6669540bd89547b19a9135657";
const serviceRegion = "eastus"; // e.g., "westus"
const language = "en-US";

const sdk = require("microsoft-cognitiveservices-speech-sdk");
const stream = require("stream");
const azurePusher = sdk.AudioInputStream.createPushStream(sdk.AudioStreamFormat.getWaveFormatPCM(8000, 16, 1));
const audioConfig = sdk.AudioConfig.fromStreamInput(azurePusher);
//const audioConfig = sdk.AudioConfig.fromDefaultSpeakerOutput();
//const audioConfig = sdk.AudioConfig.fromWavFileInput(fs.readFileSync("C:\\Users\\kenar\\Downloads\\ACCDownload_20210904094910\\Audio\\MFA IDMission Demo Audio - 1.wav"))
const speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey,serviceRegion);

speechConfig.speechRecognitionLanguage = language;
const recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);

recognizer.recognizing = (s, e) => {
    console.log(`RECOGNIZING: Text=${e.result.text}`);
};

recognizer.recognized = (s, e) => {
    if (e.result.reason == sdk.ResultReason.RecognizedSpeech) {
        console.log(`RECOGNIZED: Text=${e.result.text}`);
    }
    else if (e.result.reason == sdk.ResultReason.NoMatch) {
        console.log("NOMATCH: Speech could not be recognized.");
    }
};

recognizer.canceled = (s, e) => {
    console.log(`CANCELED: Reason=${e.reason}`);

    if (e.reason == sdk.CancellationReason.Error) {
        console.log(`"CANCELED: ErrorCode=${e.errorCode}`);
        console.log(`"CANCELED: ErrorDetails=${e.errorDetails}`);
        console.log("CANCELED: Did you update the key and location/region info?");
    }

    recognizer.stopContinuousRecognitionAsync();
};

recognizer.sessionStopped = (s, e) => {
    console.log("\n    Session stopped event.");
    recognizer.stopContinuousRecognitionAsync();
};

recognizer.startContinuousRecognitionAsync(() => {
    console.log("Continuous Reco Started");
},
    err => {
        console.trace("err - " + err);
        recognizer.close();
        recognizer = undefined;
    });

// Handle Web Socket Connection
wss.on("connection", function connection(ws) {
    console.log("New Connection Initiated");
    let recostream = null;

    ws.on("message", function incoming(message) {
        const msg = JSON.parse(message);
        switch (msg.event) {
            case "connected":
                console.log(`A new call has connected.`);

                break;
            case "start":
                console.log(`Starting Media Stream ${msg.streamSid}`);
                break;
            case "media":
                process.stdout.write(msg.media.payload + " " + " bytes\033[0G");
                streampayload = base64.decode(msg.media.payload);
                let data = Buffer.from(streampayload);
                azurePusher.write(mulaw.decode(data));
                break;
            case "stop":
                console.log(`Call Has Ended`);
                azurePusher.close();
                recognizer.stopContinuousRecognitionAsync();
            break;
        }
    });

});


 Here are the results after running with attached audio:
"C:\Program Files\nodejs\node.exe"                 
C:\Users\kenar\WebstormProjects\twiliostreams1\twiliostream.js
Listening at Port 8080
Continuous Reco Started
New Connection Initiated
A new call has connected.
Starting Media Stream MZ8dc3ec47f7b9bd3b37e1b4896beb354e
RECOGNIZED: Text=
Call Has Ended
RECOGNIZED: Text=
NOMATCH: Speech could not be recognized.

Session stopped event.
  • Please trim your code to make it easier to find your problem. Follow these guidelines to create a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). – Community Oct 07 '21 at 09:16

1 Answers1

1

First a word of caution: you should never post your subscription keys anywhere public! Anyone can grab your key and start using the Azure speech services and you would be billed for their usage. I recommend that you immediately:

  • Regenerate your subscription keys in Azure
  • Edit your post above to remove the subscription key information

I was able to get the Mulaw to PCM conversion working with an additional step. Here's a simplified version of the code that uses a sample mulaw file, and recognizeOnceAsync:

const sdk = require("microsoft-cognitiveservices-speech-sdk");
const fs = require("fs");
const alawmulaw = require("alawmulaw");

const language = "en-US";

// NOTE: Since I am reading from a mulaw file, it will include a wave file header. Assuming your basic
//       header with no additions, this will be 44 bytes long.
//       Twilio will probably *not* include this wave file header, in which case you should set this
//       value to 0.
const waveHeaderSize = 44;

var pushStream = sdk.AudioInputStream.createPushStream(sdk.AudioStreamFormat.getWaveFormatPCM(16000, 16, 1));

fs.createReadStream('c:\\temp\\short.mulaw', { start: waveHeaderSize })
    .on('data', function(arrayBuffer) {
        // This returns an Int16Array
        let rawPcm = alawmulaw.mulaw.decode(arrayBuffer);

        // Let's change our view of this data to instead be an UInt8Array
        // CAUTION:
        //      This will work on systems with a Little Endian architecture (the more
        //      common one). If your system is Big Endian, you will probably need to
        //      manually convert to a Little Endian encoded Int16 values since that i
        //      the format the Cognitive Speech service expects
        let uintView = new Uint8Array(rawPcm.buffer);
        
        pushStream.write(uintView);
    })
    .on('end', function() {
        pushStream.close();
    });

const audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);

const speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
speechConfig.speechRecognitionLanguage = language;

const recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);

recognizer.recognizeOnceAsync(
    result =>
    {
        console.log(result);
        recognizer.close();
    },
    error =>
    {
        console.log(err);
        recognizer.close();
    });

A few things to note:

  • Twilio will most likely not include a wave file header so you should set the waveHeaderSize to 0 in the sample code above
  • The code above assumes that you get multiple samples at a time. If Twilio is sending you individual samples, you should use the alawmulaw.mulaw.decodeSample function instead, and create the needed Uint8Array from that. For example (untested):
    let sample = alawmulaw.mulaw.decodeSample(sampleFromTwilio)
    var buff = Buffer.alloc(2);
    buff.writeInt16LE(sample);
    pushStream.write(buff.buffer);
    
Ralph
  • 101
  • 2
  • Yes Thanks - realized my mistake after submitting. – Ken Arakelian Sep 29 '21 at 08:56
  • I got the code to work with google speech to text. I think my problem with azure speech to text has something to do with how i'm converting mulaw to pcm – Ken Arakelian Sep 29 '21 at 12:49
  • Hi Ken and Ralph , thank you so much . I have a slight variation where Im not using a file but instead want the stream from a Twilio call to be transcribed . Im stuck at how to pass the stream . – Rajesh Rajamani Mar 09 '23 at 10:40