This code contains the user interface of the banking chatbot. I have used Mozilla's Web Speech API to implement the speech to text feature. After implementing it, I have faced a major bug. As soon as the user starts the speech recognition by clicking on the "Speak" button; the textarea automatically increases in size and covers or hides the Submit button which is preventing the user from submitting his/her query. I haven't been able to locate the error.
//initialize speech recognition API
window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const recognition = new SpeechRecognition(); //initialize my instance of speech recognition
recognition.interimResults = true; //return results while still working on current recognition
//this is where your speech-to-text results will appear
let p = document.createElement("p")
const words = document.querySelector(".words-container")
words.appendChild(p)
//I want to select and change the color of the body, but this could be any HTML element on your page
let body = document.querySelector("body")
let cap_css_colors = ["AliceBlue","AntiqueWhite","Aqua","Aquamarine","Azure","Beige","Bisque","Black","BlanchedAlmond","Blue","BlueViolet","Brown","BurlyWood","CadetBlue","Chartreuse","Chocolate","Coral","CornflowerBlue","Cornsilk","Crimson","Cyan","DarkBlue","DarkCyan","DarkGoldenRod","DarkGray","DarkGrey","DarkGreen","DarkKhaki","DarkMagenta","DarkOliveGreen","Darkorange","DarkOrchid","DarkRed","DarkSalmon","DarkSeaGreen","DarkSlateBlue","DarkSlateGray","DarkSlateGrey","DarkTurquoise","DarkViolet","DeepPink","DeepSkyBlue","DimGray","DimGrey","DodgerBlue","FireBrick","FloralWhite","ForestGreen","Fuchsia","Gainsboro","GhostWhite","Gold","GoldenRod","Gray","Grey","Green","GreenYellow","HoneyDew","HotPink","IndianRed","Indigo","Ivory","Khaki","Lavender","LavenderBlush","LawnGreen","LemonChiffon","LightBlue","LightCoral","LightCyan","LightGoldenRodYellow","LightGray","LightGrey","LightGreen","LightPink","LightSalmon","LightSeaGreen","LightSkyBlue","LightSlateGray","LightSlateGrey","LightSteelBlue","LightYellow","Lime","LimeGreen","Linen","Magenta","Maroon","MediumAquaMarine","MediumBlue","MediumOrchid","MediumPurple","MediumSeaGreen","MediumSlateBlue","MediumSpringGreen","MediumTurquoise","MediumVioletRed","MidnightBlue","MintCream","MistyRose","Moccasin","NavajoWhite","Navy","OldLace","Olive","OliveDrab","Orange","OrangeRed","Orchid","PaleGoldenRod","PaleGreen","PaleTurquoise","PaleVioletRed","PapayaWhip","PeachPuff","Peru","Pink","Plum","PowderBlue","Purple","Red","RosyBrown","RoyalBlue","SaddleBrown","Salmon","SandyBrown","SeaGreen","SeaShell","Sienna","Silver","SkyBlue","SlateBlue","SlateGray","SlateGrey","Snow","SpringGreen","SteelBlue","Tan","Teal","Thistle","Tomato","Turquoise","Violet","Wheat","White","WhiteSmoke","Yellow","YellowGreen"];
const CSS_COLORS = cap_css_colors.map(color => {
//I need to change all color names to lower case, because comparison between words will be case sensitive
return color.toLowerCase()
})
//once speech recognition determines it has a "result", grab the texts of that result, join all of them, and add to paragraph
recognition.addEventListener("result", e => {
const transcript = Array.from(e.results)
.map(result => result[0])
.map(result => result.transcript)
.join("")
p.innerText = transcript
//once speech recognition determines it has a final result, create a new paragraph and append it to the words-container
//this way every time you add a new p to hold your speech-to-text every time you're finished with the previous results
if (e.results[0].isFinal) {
p = document.createElement("p")
words.appendChild(p)
}
//for each result, map through all color names and check if current result (transcript) contains that color
//i.e. see if a person said any color name you know
CSS_COLORS.forEach(color => {
//if find a match, change your background color to that color
if (transcript.includes(color)) {
body.style.backgroundColor = color;
}
})
})
//add your functionality to the start and stop buttons
function startRecording() {
recognition.start();
recognition.addEventListener("end", recognition.start)
document.getElementById("stop").addEventListener("click", stopRecording)
}
function stopRecording() {
console.log("okay I'll stop")
recognition.removeEventListener("end", recognition.start)
recognition.stop();
}
ul {
list-style: none;
padding: 0;
}
p {
color: #444;
}
button:focus {
outline: 0;
}
.container {
max-width: 700px;
margin: 0 auto;
padding: 100px 50px;
text-align: center;
}
.container h1 {
margin-bottom: 20px;
}
.page-description {
font-size: 1.1rem;
margin: 0 auto;
}
.tz-link {
font-size: 1em;
color: #1da7da;
text-decoration: none;
}
.no-browser-support {
display: none;
font-size: 1.2rem;
color: #e64427;
margin-top: 35px;
}
.app {
margin: 40px auto;
}
#note-textarea {
margin: 20px 0;
}
#recording-instructions {
margin: 15px auto 60px;
}
#notes {
padding-top: 20px;
}
.note .header {
font-size: 0.9em;
color: #888;
margin-bottom: 10px;
}
.note .delete-note,
.note .listen-note {
text-decoration: none;
margin-left: 15px;
}
.note .content {
margin-bottom: 40px;
}
@media (max-width: 768px) {
.container {
padding: 50px 25px;
}
button {
margin-bottom: 10px;
}
}
/* -- Demo ads -- */
@media (max-width: 1200px) {
#bsaHolder{ display:none;}
}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>MJ BOT </title>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="stylesheet" href="{{ url_for('static', filename='styles/style.css') }}">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
</head>
<body>
<!-- partial:index.partial.html -->
<section class="msger">
<header class="msger-header">
<div class="msger-header-title">
<i class=""></i> MJ Chatbot <i class=""></i>
</div>
</header>
<main class="msger-chat">
<div class="msg left-msg">
<div class="msg-img" style="background-image: url(https://image.flaticon.com/icons/svg/145/145867.svg)"></div>
<div class="msg-bubble">
<div class="msg-info">
<div class="msg-info-name"></div>
</div>
<div class="msg-text">
<p> {{ questionAsked }} </p>
</div>
</div>
</div>
</main>
<article>
<main class="msger-chat">
<div class="msg right-msg">
<div class="msg-img" style="background-image: url(https://image.flaticon.com/icons/svg/327/327779.svg)"></div>
<div class="msg-bubble">
<div class="msg-info">
<div class="msg-info-name"></div>
</div>
<div class="msg-text">
<p> {{ response }}</p>
</div>
</div>
</div>
</article>
</main>
<form id="output" class="msger-inputarea" action="signup" method="post">
<input id="output" class="msger-input" type="text" name="question"></input>
<input id='play' class="msger-send-btn" type="submit" value="Submit Message !" > </input>
<input type="button" value="Speak" onclick="runSpeechRecognition()"></input>
<button id='stop'></button>
</form>
<button id=play style="font-size:24px">Listen <i class="fas fa-file-audio"></i></button>
<a href="{{ url_for('contact')}}">Send Query to Agent !</a>
</section>
<script >onload = function() {
if ('speechSynthesis' in window) with(speechSynthesis) {
var playEle = document.querySelector('#play');
var pauseEle = document.querySelector('#pause');
var stopEle = document.querySelector('#stop');
var flag = false;
playEle.addEventListener('click', onClickPlay);
pauseEle.addEventListener('click', onClickPause);
stopEle.addEventListener('click', onClickStop);
function onClickPlay() {
if(!flag){
flag = true;
utterance = new SpeechSynthesisUtterance(document.querySelector('article').textContent);
utterance.voice = getVoices()[0];
utterance.onend = function(){
flag = false; playEle.className = pauseEle.className = ''; stopEle.className = 'stopped';
};
playEle.className = 'played';
stopEle.className = '';
speak(utterance);
}
if (paused) { /* unpause/resume narration */
playEle.className = 'played';
pauseEle.className = '';
resume();
}
}
function onClickPause() {
if(speaking && !paused){ /* pause narration */
pauseEle.className = 'paused';
playEle.className = '';
pause();
}
}
function onClickStop() {
if(speaking){ /* stop narration */
/* for safari */
stopEle.className = 'stopped';
playEle.className = pauseEle.className = '';
flag = false;
cancel();
}
}
}
else { /* speech synthesis not supported */
msg = document.createElement('h5');
msg.textContent = "Detected no support for Speech Synthesis";
msg.style.textAlign = 'center';
msg.style.backgroundColor = 'red';
msg.style.color = 'white';
msg.style.marginTop = msg.style.marginBottom = 0;
document.body.insertBefore(msg, document.querySelector('div'));
}
}
</script>
<script>
/* JS comes here */
function runSpeechRecognition() {
// get output div reference
var output = document.getElementById("output");
// get action element reference
var action = document.getElementById("help");
// new speech recognition object
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;
var recognition = new SpeechRecognition();
// This runs when the speech recognition service starts
recognition.onstart = function() {
action.innerHTML = "<small>listening, please speak...</small>";
};
recognition.onspeechend = function() {
action.innerHTML = "<small>stopped listening, hope you are done...</small>";
recognition.stop();
}
// This runs when the speech recognition service returns result
recognition.onresult = function(event) {
var transcript = event.results[0][0].transcript;
var confidence = event.results[0][0].confidence;
output.innerHTML = "<b></b> " + transcript + "<br/> <b></b> " ;
output.classList.remove("hide");
};
// start recognition
recognition.start();
}
</script>
<!-- partial -->
<script src='https://use.fontawesome.com/releases/v5.0.13/js/all.js'></script>
</body>
</html>