I've recently started building a Javascript program for Pepper. My goal is to make pepper listen to what people say and either say Hello or make Pepper do an animation based on keyword 'Hello/Animation' in WordRecognized event in Javascript.
As of now,I'm able to show two buttons on the tablet using JavaScript and make Pepper say Hello on one button press and perform animations on another button press. Clicking on the buttons work but I'm not able to make it working for the WordRecognized events by using the Qi Javascript SDK (http://doc.aldebaran.com/2-4/dev/js/index.html ). I went through the link mentioned here and came up with the below code snippet that makes Pepper say Word Detected on hearing the recognized word. Just wondering what else am I missing in the code to make Pepper listen to the words and perform action accordingly?
//Start the Speech Recognition
var asr = session.service('ALSpeechRecognition');
//Define the Vocabulary
vocabulary = ["hello", "dance"];
//Set The Language To English and set the Vocabulary
asr = asr.then( function(asr) { return asr.setLanguage('English') }).then( function(asr){ return asr.setVocabulary(vocabulary, false); } );
console.log("Set the Language to English!");
//Register the Callback function for the Speech REcognition
asr.unsubscribe(); //De-Register if Existing from Before
asr.subscribe();
session.service("ALMemory").then(function (ALMemory) {
ALMemory.subscriber("wordRecognized").then(function (subscriber) {
// subscriber.signal is a signal associated to "wordRecognized"
subscriber.signal.connect(function (state) {
word = state.getData("wordRecognized")[1];
word.then( function() { session.service('ALTextToSpeech').say("A Keyword is Detected!") });
asr.unsubscribe();
}); //subscriber
}); //connect
}); //ALMemory
});