And search localhost:3000 in any supported browser.<\/p>\n\n\n\n
You can refer to my repository for further help.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Now, let\u2019s go back to the Node.js code to receive this text and use AI to reply to the user.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n socket.emit('chat message', text);<\/p>\n\n\n\n Now, let\u2019s go back to the Node.js code to receive this text and use AI to reply to the user.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Then, insert this code where you are listening to the result event from SpeechRecognition:<\/p>\n\n\n\n socket.emit('chat message', text);<\/p>\n\n\n\n Now, let\u2019s go back to the Node.js code to receive this text and use AI to reply to the user.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n const socket = io();<\/em><\/p>\n\n\n\n Then, insert this code where you are listening to the result event from SpeechRecognition:<\/p>\n\n\n\n socket.emit('chat message', text);<\/p>\n\n\n\n Now, let\u2019s go back to the Node.js code to receive this text and use AI to reply to the user.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n Instantiate Socket.IO in script.js somewhere:<\/p>\n\n\n\n const socket = io();<\/em><\/p>\n\n\n\n Then, insert this code where you are listening to the result event from SpeechRecognition:<\/p>\n\n\n\n socket.emit('chat message', text);<\/p>\n\n\n\n Now, let\u2019s go back to the Node.js code to receive this text and use AI to reply to the user.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n Once the connection is established and the message is received, use the API.AI APIs to retrieve a reply to the user\u2019s message.When API.AI returns the result, use Socket.IO\u2019s socket.emit() to send it back to the browser.<\/p>\n\n\n\n Create a function to generate a synthetic voice. This time, we are using the SpeechSynthesis controller interface of the Web Speech API.<\/p>\n\n\n\n The function takes a string as an argument and enables the browser to speak the text:<\/p>\n\n\n\n In the function, first, create a reference to the API entry point, window.speechSynthesis. You might notice that there is no prefixed property this time: This API is more widely supported than SpeechRecognition, and all browsers that support it have already dropped the prefix for SpeechSysthesis.<\/p>\n\n\n\n Then, create a new SpeechSynthesisUtterance() instance using its constructor, and set the text that will be synthesised when the utterance is spoken. You can set other properties, such as voice to choose the type of the voices that the browser and operating system should support.<\/p>\n\n\n\n Finally, use the SpeechSynthesis.speak() to let it speak!<\/p>\n\n\n\n Now, get the response from the server using Socket.IO again. Once the message is received, call the function.<\/p>\n\n\n\n It's done.Run the following command in your terminal.<\/p>\n\n\n\n $ node index.js<\/em><\/p>\n\n\n\n And search localhost:3000 in any supported browser.<\/p>\n\n\n\n You can refer to my repository for further help.<\/p>\n\n\n\n You may be wondering why are we not using simple HTTP or AJAX instead. You could send data to the server via POST. However, we are using WebSocket via Socket.IO because sockets are the best solution for bidirectional communication, especially when pushing an event from the server to the browser. With a continuous socket connection, we won\u2019t need to reload the browser or keep sending an AJAX request at a frequent interval.<\/p>\n\n\n\n Instantiate Socket.IO in script.js somewhere:<\/p>\n\n\n\n const socket = io();<\/em><\/p>\n\n\n\n Then, insert this code where you are listening to the result event from SpeechRecognition:<\/p>\n\n\n\n socket.emit('chat message', text);<\/p>\n\n\n\n Now, let\u2019s go back to the Node.js code to receive this text and use AI to reply to the user.<\/p>\n\n\n\n To build a quick conversational interface, we will use API.AI because it provides a free developer account and allows us to set up a small-talk system quickly using its web interface and Node.js library.<\/p>\n\n\n\n Use this for reference:<\/p>\n\n\n\n or get your own by visiting the official site(Getting Started)<\/a>and signing up.<\/p>\n\n\n\n Now we will use the server-side Socket.IO to receive the result from the browser.<\/strong><\/p>\n\n\n\n'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n
io.on('connection', function(socket) {\n socket.on('chat message', (text) => {\n \n \/\/ Get a reply from API.AI\n \n let apiaiReq = apiai.textRequest(text, {\n sessionId: APIAI_SESSION_ID\n });\n \n apiaiReq.on('response', (response) => {\n let aiText = response.result.fulfillment.speech;\n socket.emit('bot reply', aiText); \/\/ Send the result back to the browser!\n });\n \n apiaiReq.on('error', (error) => {\n console.log(error);\n });\n \n apiaiReq.end();\n \n });\n });\n<\/pre>\n\n\n\n
Giving Voice to the bot With The SpeechSynthesis Interface:<\/strong><\/h3>\n\n\n\n
function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n<\/pre>\n\n\n\n
'use strict';\n\n const socket = io();\n \n const outputYou = document.querySelector('.output-you');\n const outputBot = document.querySelector('.output-bot');\n \n const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;\n const recognition = new SpeechRecognition();\n \n recognition.lang = 'en-US';\n recognition.interimResults = false;\n recognition.maxAlternatives = 1;\n \n document.querySelector('button').addEventListener('click', () => {\n recognition.start();\n });\n \n recognition.addEventListener('speechstart', () => {\n console.log('Speech has been detected.');\n });\n \n recognition.addEventListener('result', (e) => {\n console.log('Result has been detected.');\n \n let last = e.results.length - 1;\n let text = e.results[last][0].transcript;\n \n outputYou.textContent = text;\n console.log('Confidence: ' + e.results[0][0].confidence);\n \n socket.emit('chat message', text);\n });\n \n recognition.addEventListener('speechend', () => {\n recognition.stop();\n });\n \n recognition.addEventListener('error', (e) => {\n outputBot.textContent = 'Error: ' + e.error;\n });\n \n function synthVoice(text) {\n const synth = window.speechSynthesis;\n const utterance = new SpeechSynthesisUtterance();\n utterance.text = text;\n synth.speak(utterance);\n }\n \n socket.on('bot reply', function(replyText) {\n synthVoice(replyText);\n \n if(replyText == '') replyText = '(No answer...)';\n outputBot.textContent = replyText;\n });\n<\/pre>\n\n\n\n
Setting Up APIAI:<\/strong><\/h3>\n\n\n\n
var APIAI_TOKEN =apiai(\"5afc4bdf601046b39972ff3866cca392\");\n const APIAI_SESSION_ID = \"chatbot-clvxfh\";\n<\/pre>\n\n\n\n