0

so recently for a work project I've been playing around with speech to text models and in particular custom speech to text models. With a bit of mixing and matching examples I've managed to get a test application to talk to the normal Bing speech to text API. But when I attempt to use it with a custom speech instance only the HTTPS URL works. When I use any of the available long form web socket URLS the error An unhandled exception of type 'System.NullReferenceException' occurred in SpeechClient.dll occurs. This is a bit of a problem as that endpoint only supports 2 minutes of transcription, where as the websocket endpoint supports up to 10 minutes.

This https://learn.microsoft.com/en-us/azure/cognitive-services/custom-speech-service/customspeech-how-to-topics/cognitive-services-custom-speech-use-endpoint page here is what I'm going off of. It says that I should use a web socket url when creating the service, but that leads to the error above.

Here my test bed code for trying it out:

using System;
using Microsoft.CognitiveServices.SpeechRecognition;
using System.IO;

namespace ConsoleApp1
{
    class Program
    {
        DataRecognitionClient dataClient;

        static void Main(string[] args)
        {
            Program p = new Program();
            p.Run(args);
        }

        void Run(string[] args)
        {
            try
            {

                // Works
                //this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient(SpeechRecognitionMode.LongDictation, "en-US", "Key");

                // Works
                //this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient(SpeechRecognitionMode.LongDictation, "en-US",
                //                                                                    "Key", "Key",
                //                                                                    "https://Id.api.cris.ai/ws/cris/speech/recognize/continuous");

                // Doesn't work
                this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient(SpeechRecognitionMode.LongDictation, "en-US",
                                                                                    "Key", "Key",
                                                                                    "wss://Id.api.cris.ai/ws/cris/speech/recognize/continuous");

                this.dataClient.AuthenticationUri = "https://westus.api.cognitive.microsoft.com/sts/v1.0/issueToken";

                this.dataClient.OnResponseReceived += this.ResponseHandler;
                this.dataClient.OnConversationError += this.ErrorHandler;
                this.dataClient.OnPartialResponseReceived += this.PartialHandler;

                Console.WriteLine("Starting Transcription");
                this.SendAudioHelper("Audio file path");
                (new System.Threading.ManualResetEvent(false)).WaitOne();
            } catch(Exception e)
            {
                Console.WriteLine(e);
            }

        }

        private void SendAudioHelper(string wavFileName)
        {
            using (FileStream fileStream = new FileStream(wavFileName, FileMode.Open, FileAccess.Read))
            {
                // Note for wave files, we can just send data from the file right to the server.
                // In the case you are not an audio file in wave format, and instead you have just
                // raw data (for example audio coming over bluetooth), then before sending up any 
                // audio data, you must first send up an SpeechAudioFormat descriptor to describe 
                // the layout and format of your raw audio data via DataRecognitionClient's sendAudioFormat() method.
                int bytesRead = 0;
                byte[] buffer = new byte[1024];

                try
                {
                    do
                    {
                        // Get more Audio data to send into byte buffer.
                        bytesRead = fileStream.Read(buffer, 0, buffer.Length);

                        // Send of audio data to service. 
                        this.dataClient.SendAudio(buffer, bytesRead);
                    }
                    while (bytesRead > 0);
                }
                finally
                {
                    // We are done sending audio.  Final recognition results will arrive in OnResponseReceived event call.
                    this.dataClient.EndAudio();
                }
            }
        }

        void ErrorHandler(object sender, SpeechErrorEventArgs e)
        {
            Console.WriteLine(e.SpeechErrorText);
        }

        void ResponseHandler(object sender, SpeechResponseEventArgs e)
        {
            if(e.PhraseResponse.RecognitionStatus == RecognitionStatus.EndOfDictation || e.PhraseResponse.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout)
            {
                Console.WriteLine("Trnascription Over");
                Console.ReadKey();
                Environment.Exit(0);
            }
            for(int i = 0; i < e.PhraseResponse.Results.Length; i++)
            {
                Console.Write(e.PhraseResponse.Results[i].DisplayText);
            }
            Console.WriteLine();
        }

        void PartialHandler(object sender, PartialSpeechResponseEventArgs e)
        {

        }
    }
}

Thanks in advance for any help.

2 Answers2

0

so you are probably ok with using https ... we are revisiting the SDKs right now (restructuring/reorganizing). I expect updates in the next couple of months.

Wolfgang

wolfma
  • 426
  • 2
  • 3
0

The new speech service SDK supports Custom Speech Service out-of-box. Please also check the samples RecognitionUsingCustomizedModelAsync() here for details.

Zhou
  • 546
  • 3
  • 3