Hudson Gould
Published © MIT

CaneCam - helping the blind cross the street with CV/TinyML

A TinyML-powered CV cane to solve an everyday challenge for the blind - crossing the street.

BeginnerFull instructions provided3 hours786
CaneCam - helping the blind cross the street with CV/TinyML

Things used in this project

Hardware components

Walking Cane (Generic)
Any walking cane will work
×1
Raspberry Pi 4 Model B
Raspberry Pi 4 Model B
×1
USB Webcam (Generic)
Any type of USB Webcam / Pi Camera works
×1
USB Power Bank (Generic)
×1
5V relay (Generic)
×1
DC motor (generic)
×1
9V battery (generic)
9V battery (generic)
×1
Jumper wires (generic)
Jumper wires (generic)
×1
Mini Breadboard (Generic)
×1
Small Box (Generic)
Tupperware works fantastic
×1
2x Bolt, nut and washer (Generic)
×1

Software apps and online services

Edge Impulse Studio
Edge Impulse Studio
Raspbian
Raspberry Pi Raspbian

Hand tools and fabrication machines

Drill / Driver, Cordless
Drill / Driver, Cordless

Story

Read more

Schematics

Schematic (Ardu for RPi)

A simple diagram of connecting the Raspberry Pi to a 5V relay which controls a 9V and DC Motor.

Code

CaneCam.py

Python
Used to run the CaneCam
#Import all needed libraries
import cv2
import os
import sys, getopt
import signal
import time
from edge_impulse_linux.image import ImageImpulseRunner

#Servo imports to use GPI/O pins
import RPi.GPIO as GPIO

motorpin = 18 #Sets the pin to control the relay
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(motorpin, GPIO.OUT) #Makes the motorpin an output

runner = None
show_camera = True #whether you want to see the vid box or not, set T/F
min_conf_threshold = 0.90 #minimum confidence threshhold for making a recommendation, edit for suggestions

def now(): #returns time
    return round(time.time() * 1000)

def get_webcams(): #gets the port IDs of the connected webcam(s)
    port_ids = []
    for port in range(5):
        print("Looking for a camera in port %s:" %port)
        camera = cv2.VideoCapture(port)
        if camera.isOpened():
            ret = camera.read()[0]
            if ret:
                backendName =camera.getBackendName()
                w = camera.get(3)
                h = camera.get(4)
                print("Camera %s (%s x %s) found in port %s " %(backendName,h,w, port))
                port_ids.append(port)
            camera.release()
    return port_ids

def sigint_handler(sig, frame): #interrupt handler
    print('Interrupted')
    if (runner):
        runner.stop()
    sys.exit(0)

signal.signal(signal.SIGINT, sigint_handler)

def help(): #help
    print('python classify.py <path_to_model.eim> <Camera port ID, only required when more than 1 camera is present>')

def main(argv):
    try:
        opts, args = getopt.getopt(argv, "h", ["--help"])
    except getopt.GetoptError:
        help()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            help()
            sys.exit()

    if len(args) == 0:
        help()
        sys.exit(2)

    model = args[0]
    
    #Creates paths to the directory and modelfile.eim that we downloaded
    dir_path = os.path.dirname(os.path.realpath(__file__))
    modelfile = os.path.join(dir_path, model)

    print('MODEL: ' + modelfile)

    with ImageImpulseRunner(modelfile) as runner:
        try:
            model_info = runner.init()
            print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
            labels = model_info['model_parameters']['labels']
            if len(args)>= 2:
                videoCaptureDeviceId = int(args[1]) #Sets the webcam to capture
            else:
                port_ids = get_webcams()
                if len(port_ids) == 0:
                    raise Exception('Cannot find any webcams')
                if len(args)<= 1 and len(port_ids)> 1:
                    raise Exception("Multiple cameras found. Add the camera port ID as a second argument to use to this script")
                videoCaptureDeviceId = int(port_ids[0])
            
            #Sets up the camera
            camera = cv2.VideoCapture(videoCaptureDeviceId)
            ret = camera.read()[0]
            if ret:
                backendName = camera.getBackendName()
                w = camera.get(3)
                h = camera.get(4)
                print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w, videoCaptureDeviceId))
                camera.release()
            else:
                raise Exception("Couldn't initialize selected camera.")

            next_frame = 0 # limit to ~10 fps here
            
            rn = now()

            #Workhorse algorithm, this is where the actual classification happens
            for res, img in runner.classifier(videoCaptureDeviceId): #for each frame

                #If there's a valid class
                if "classification" in res["result"].keys():
                    decision = ''
                    for label in labels: #Get the score of each label
                        score = res['result']['classification'][label]
                        if score > min_conf_threshold: #and check if it's > min_conf_threshold
                            decision = label #if it is, set it as a decision
                        print('%s: %.2f\t' % (label, score), end='')
                    print('You should: '+decision) #make the suggestion. "You should: " + "Go/Stop" etc.
                    if decision == 'Go': #If there's a go light
                        GPIO.output(motorpin, 1) #Turn on the motor to vibrate
                    elif decision == 'Stop' or decision == '': #If not
                        GPIO.output(motorpin, 0) #Turn off the motor
                
                #Print the bounding boxes
                elif "bounding_boxes" in res["result"].keys():
                    print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
                    for bb in res["result"]["bounding_boxes"]:
                        print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
                        img = cv2.rectangle(img, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1)
                
                #Show camera, also listen for q to exit
                if (show_camera):
                    cv2.imshow('edgeimpulse', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
                    if cv2.waitKey(1) == ord('q'):
                        break
                
                #This is important, editing the 100 here changes your framerate. The max is about 10, but
                #we don't need more than 5 for this project
                next_frame = now() + 100
                print(now()-rn)
                if (next_frame > now()):
                    time.sleep((next_frame - now()) / 1000)
        finally:
            if (runner):
                runner.stop()

if __name__ == "__main__":
   main(sys.argv[1:])

Credits

Hudson Gould

Hudson Gould

1 project • 0 followers

Comments