0

I have a questionnaire of 6 questions. These are presented through speechSynthesis. After each question, I need to wait for an oral response that I will process, before presenting the next question. My code is an attempt at it. Code DOES go through the callback. But, how to process the logic sequentially, 'state question', 'listen', 'state next question', 'listen'...


//..ToDo: Because we need verbal response for each question,
//..   we need to change the recognition.onResult call back

function processPromptedInteraction(event)
{
    var speechToText = event.results[0][0].transcript;
    if (speechToText.includes('yes'))
    {    }
    else if (speechToText.includes('no'))
    {    }
    else
    {    }
}

var strQuestion = '';
for (i = 0; i < questions[i].length; i++) 
{
    recognition.onresult = processPromptedInteraction; //.. Callback function
    strQuestion = questions[i].question;
    say(strQuestion);
}

J.R
  • 3
  • 1
  • Note: SpeechRecognition and SpeechSynthesis are working. It is the logic I am struggling with – J.R Apr 07 '19 at 18:07

1 Answers1

0

events are asynchronous, so your code doesn't wait for users to answer questions one by one. Hope the following solution works for you. Let me know if I'm missing something.

Note: Code may not run here due to browser security. Use the jsfiddle link below to run the code. See browser console for the final result

https://jsfiddle.net/ajai/L8p4aqtr/

class Questions {
  constructor(questions) {
    this.questions = questions;
    this.currentIndex = 0;
    this.MAX = this.questions.length - 1;

    // answers hash
    this.answers = questions.reduce((hash, q) => {
      hash[q] = '';
      return hash;
    }, {});

    this.initSpeech();
  }

  initSpeech() {
    const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;

    this.speechSynthesis = window.speechSynthesis;

    this.recognition = new webkitSpeechRecognition();

    this.recognition.continuous = true;
    this.recognition.interimResults = false;

    this.recognition.onresult = this.recognize.bind(this);
  }

  recognize(event) {
    const last = event.results.length - 1;
    const result = event.results[last][0].transcript;

    if (result.includes('yes')) {
      this.setAnswer('Yes');
      this.next();
    } else if (result.includes('no')) {
      this.setAnswer('No');
      this.next();
    } else {
      // ask same question again
      this.say('Can\'t recognize your answer');
      this.ask();
    }
  }

  setAnswer(answer) {
    this.answers[this.questions[this.currentIndex]] = answer;
  }

  start() {
    this.currentIndex = 0;
    this.recognition.start();

    this.ask();

    return this;
  }

  stop() {
    this.recognition.stop();

    this.onComplete && this.onComplete(this.answers);
  }

  ask() {
    const questionToAsk = this.questions[this.currentIndex];
    this.say(questionToAsk);
  }

  say(msg) {
    const synth = new SpeechSynthesisUtterance(msg);
    this.speechSynthesis.speak(synth);
  }

  next() {
    if (this.currentIndex < this.MAX) {
      this.currentIndex++;
      this.ask();
    } else {
      this.stop();
    }
  }

  getAnswers() {
    return this.answers;
  }

  static create(questions) {
    return new Questions(questions);
  }
}

// const q = new Questions(['Question 1?', 'Question 2?', 'Question 3?']);
const q = Questions.create(['Question 1?', 'Question 2?', 'Question 3?']);

q.start().onComplete = function(result) {
  console.log(this.answers);
};
ajai Jothi
  • 2,284
  • 1
  • 8
  • 16
  • Very interesting solution. While I am not able to use this yet, since I already have another class with a speechRecognition object, I will see how I can incorporate some your elements. Will update this post soon. Cheers – J.R Apr 08 '19 at 00:16
  • 1
    Thank you, Ajai. I am redesigning my code to accommodate your model. Prototype worked. Now to integrate it into my system that already has a huge speechrecog component. Cheers. – J.R Apr 08 '19 at 13:19