Hackster is hosting Hackster Holidays, Ep. 6: Livestream & Giveaway Drawing. Watch previous episodes or stream live on Monday!Stream Hackster Holidays, Ep. 6 on Monday!
Yahya Khalid
Created September 4, 2024

Home Assist v0.1

Smart voice assistant with capability to locate everyday objects through voice and auditory feedback for visually impaired individuals

26

Things used in this project

Story

Read more

Code

Home Assist UNIHIKER jupyter notebook code

Python
Save this in jupyter notebook contained in UNIHIKER and execute it for Home Assist to run. Make sure to first resolve all dependencies.
import time
import json
import speech_recognition as sr
from pinpong.board import Board, Pin
from pinpong.extension.unihiker import *
from unihiker import GUI
import paho.mqtt.client as mqtt

# MQTT configuration
broker = 'broker.mqtt.cool'  # MQTT broker address
port = 1883  # MQTT broker port (usually 1883 for non-SSL)
topic = '/test/qos0'  # Replace with your desired topic

def create_json_string(data_string):
    # Create the dictionary with the given data string
    data = {
        'cmd': data_string
    }

    # Convert the dictionary to a JSON string
    json_str = json.dumps(data)
    return json_str

# Global client instance
client = None

def on_connect(client, userdata, flags, rc):
    print(f'Connected with result code {rc}')
    # Publish the JSON data once connected
    global json_data
    client.publish(topic, json_data)
    print(f'Published: {json_data} to topic: {topic}')

def start_mqtt():
    global client
    # Create a new MQTT client instance
    client = mqtt.Client()

    # Assign the callback function
    client.on_connect = on_connect

    # Connect to the MQTT broker
    client.connect(broker, port, 60)

    # Start the loop to process network traffic and dispatch callbacks
    client.loop_start()

def stop_mqtt():
    global client
    if client is not None:
        client.loop_stop()
        client.disconnect()

def send_mqtt_data(mqtt_data):
    global json_data
    json_data = create_json_string(mqtt_data)
    start_mqtt()
    time.sleep(2)
    stop_mqtt()
    
# Initialize the UNIHIKER
Board().begin()

# Instantiate the GUI class
gui = GUI()

def home_screen():
    #clear screen
    gui.fill_rect(x=0,y=0,w=240,h=320,color="white")
    # Load and show assistant image
    img_image = gui.draw_image(x=120, y=100, w=120, h=100, image='upload/bot.png', origin='center')
    #info_text = gui.draw_text(x=50, y=160, text='Home Assist v0.1')
    gui.draw_text(x=25, y=170, text='Home Assist v0.1', font_size=17, color="green")
    gui.draw_text(x=25 - 1, y=170, text='Home Assist v0.1', font_size=17, color="green")
    gui.draw_text(x=25 + 1, y=170, text='Home Assist v0.1', font_size=17, color="green")
    gui.draw_text(x=40, y=220, w=180, text='Say HELLO to wake device and then say       something...', font_size=11, color="grey")
    
def listening_screen():
    #clear screen
    gui.fill_rect(x=0,y=0,w=240,h=320,color="white")
    img_image = gui.draw_image(x=120, y=100, w=120, h=100, image='upload/assistant.png', origin='center')
    gui.draw_text(x = 120,y=190,text="Listening.....", font_size=18, color="black", angle=0, origin="center")
    gui.draw_text(x = 120 + 1,y=190,text="Listening.....", font_size=18, color="black", angle=0, origin="center")
    gui.draw_text(x = 120 - 1,y=190,text="Listening.....", font_size=18, color="black", angle=0, origin="center")
    #info_text = gui.draw_text(x=80, y=180, text='Listening.....')

def show_cmd_onscreen(text):
    gui.draw_text(x=80, y=220, text="' " + text + " '", color="green")
    gui.draw_text(x=80 + 1, y=220, text="' " + text + " '", color="green")
    gui.draw_text(x=80 - 1, y=220, text="' " + text + " '", color="green")
    
# Load and show assistant image
#img_image = gui.draw_image(x=120, y=100, w=120, h=100, image='upload/bot.png', origin='center')
#img_image = gui.draw_image(x=120, y=100, w=120, h=100, image='upload/assistant.png', origin='center')
#info_text = gui.draw_text(x=50, y=160, text='Home Assist v0.1')

# Initialize recognizer
recognizer = sr.Recognizer()

# Define the wake word
WAKE_WORD = "hello"
# Function to check if the wake word is in the text
def contains_wake_word(text):
    return WAKE_WORD in text.lower()

# Function to listen and process audio
def listen_and_recognize():
    home_screen()
    wake_up_state = False
    with sr.Microphone() as source:
        print("Listening for the wake word...")
        while True:
            # Adjust for ambient noise and listen
            recognizer.adjust_for_ambient_noise(source)
            audio = recognizer.listen(source)
            
            try:
                # Recognize speech using Google Web Speech API
                text = recognizer.recognize_google(audio)
                print(f"Detected: {text}")
                
                # Check if the wake word is detected
                if contains_wake_word(text):
                    buzzer.pitch(300, 1)
                    print(f"Wake word '{WAKE_WORD}' detected. Listening for commands...")
                    listening_screen()
                    wake_up_state = True
                else:
                    if (wake_up_state == True):
                        print(f"already woke up detected a command")
                        wake_up_state = False
                        print(f"command: {text}")
                        if text == "find keys":
                            print("found keys")
                            send_mqtt_data(text)
                            buzzer.pitch(200, 1)
                        else:
                            print("something else")
                            buzzer.pitch(200, 1)
                        show_cmd_onscreen(text)
                        #info_text = gui.draw_text(x=80, y=220, text="'" + text + "'")
                        time.sleep(3)
                        home_screen()
                            
                    else:
                        print(f"Listening for wakeword...")
                        home_screen()
                    
            except sr.UnknownValueError:
                # Handle the case where speech is not recognized
                pass
            except sr.RequestError as e:
                # Handle request errors
                print(f"Request error: {e}")

if __name__ == "__main__":
    listen_and_recognize()

Credits

Yahya Khalid

Yahya Khalid

2 projects • 1 follower

Comments