Whitney Knitter
Published © GPL3+

Lego Land Rover Defender Robot with Kria KR260

This project shows how I used the Kria KR260 & Edge Impulse to transform the Land Rover Defender Lego Technic kit into an autonomous robot.

AdvancedFull instructions provided10 hours5,134
Lego Land Rover Defender Robot with Kria KR260

Things used in this project

Hardware components

Kria™ KR260 Robotics Starter Kit
AMD Kria™ KR260 Robotics Starter Kit
×1
LEGO Land Rover Defender Technic 42110
×1
Pmod HB3
Digilent Pmod HB3
×2
Adafruit N20 DC Motor with Magnetic Encoder - 6V with 1:100 Gear Ratio
×1
OSEPP High Torque Electric Motor (6V)
×1
Technic Parts Spare Parts Kit
×1
Adafruit Compact Switching Power Supply - Selectable Output 3-12VDC
×1
SIM&NAT 1 Female to 2 Male 5.5mm x 2.1mm DC Power Supply Splitter Cord
×1
SIM&NAT DC Female 5.5mm x 2.1mm Power Jack Adapter Plug Pigtail Cable
×2
Creality Enders Pro 3 Synchronous Wheel
×2
3D Printer GT2 Belt 2mm Pitch 6mm Wide for Creality Ender 3 Pro Ender
×1

Software apps and online services

Vivado Design Suite
AMD Vivado Design Suite
Edge Impulse Studio
Edge Impulse Studio
Ubuntu Desktop for AMD-Xilinx

Story

Read more

Code

pumpkin_avoidance.py

Python
Based on classify.py from Edge Impulse Python SDK examples - this is the script for detecting the obstacle and steering around it
#!/usr/bin/env python

import os
import cv2
import time
import signal
import sys, getopt
from edge_impulse_linux.image import ImageImpulseRunner

# Steer motor - Pmod 1
# MOTOR1_DIR 318
# MOTOR1_EN  319
# MOTOR1_SA  320
# MOTOR1_SB  321

# Drive motor - Pmod 2
# MOTOR1_DIR 310
# MOTOR1_EN  311

left = 1
straight = 0
right = 2

def half_turn_left(wheel_dir_current):
    if (wheel_dir_current == straight):
        wheel_dir = left
        os.system('echo 1 > /sys/class/gpio/gpio318/value') # turn left
        time.sleep(0.1)
        os.system('echo 1 > /sys/class/gpio/gpio319/value')
        time.sleep(0.3)
        os.system('echo 0 > /sys/class/gpio/gpio319/value')
        time.sleep(0.1)
    elif (wheel_dir_current == right):
        wheel_dir = straight
        os.system('echo 1 > /sys/class/gpio/gpio318/value') # turn left
        time.sleep(0.1)
        os.system('echo 1 > /sys/class/gpio/gpio319/value')
        time.sleep(0.3)
        os.system('echo 0 > /sys/class/gpio/gpio319/value')
        time.sleep(0.1)
    else:
        wheel_dir = wheel_dir_current
        print('Wheels cannot turn any further left...')

    return wheel_dir

def full_turn_left(wheel_dir_current):
    if (wheel_dir_current == right):
        wheel_dir = left
        os.system('echo 1 > /sys/class/gpio/gpio318/value') # turn left
        time.sleep(0.1)
        os.system('echo 1 > /sys/class/gpio/gpio319/value')
        time.sleep(0.6)
        os.system('echo 0 > /sys/class/gpio/gpio319/value')
        time.sleep(0.1)
    else:
        wheel_dir = wheel_dir_current
        print('Wheels cannot make full turn left, try half turn left...')

    return wheel_dir

def half_turn_right(wheel_dir_current):
    if (wheel_dir_current == straight):
        wheel_dir = right
        os.system('echo 0 > /sys/class/gpio/gpio318/value') # turn right
        time.sleep(0.1)
        os.system('echo 1 > /sys/class/gpio/gpio319/value')
        time.sleep(0.3)
        os.system('echo 0 > /sys/class/gpio/gpio319/value')
        time.sleep(0.1)
    elif (wheel_dir_current == left):
        wheel_dir = straight
        os.system('echo 0 > /sys/class/gpio/gpio318/value') # turn right
        time.sleep(0.1)
        os.system('echo 1 > /sys/class/gpio/gpio319/value')
        time.sleep(0.3)
        os.system('echo 0 > /sys/class/gpio/gpio319/value')
        time.sleep(0.1)
    else:
        wheel_dir = wheel_dir_current
        print('Wheels cannot turn any further right...')

    return wheel_dir

def full_turn_right(wheel_dir_current):
    if (wheel_dir_current == left):
        wheel_dir = right
        os.system('echo 0 > /sys/class/gpio/gpio318/value') # turn right
        time.sleep(0.1)
        os.system('echo 1 > /sys/class/gpio/gpio319/value')
        time.sleep(0.6)
        os.system('echo 0 > /sys/class/gpio/gpio319/value')
        time.sleep(0.1)
    else:
        wheel_dir = wheel_dir_current
        print('Wheels cannot make full turn right, try half turn right...')

    return wheel_dir

runner = None
# if you don't want to see a camera preview, set this to False
show_camera = True
if (sys.platform == 'linux' and not os.environ.get('DISPLAY')):
    show_camera = False

def now():
    return round(time.time() * 1000)

def get_webcams():
    port_ids = []
    for port in range(5):
        print("Looking for a camera in port %s:" %port)
        camera = cv2.VideoCapture(port)
        if camera.isOpened():
            ret = camera.read()[0]
            if ret:
                backendName =camera.getBackendName()
                w = camera.get(3)
                h = camera.get(4)
                print("Camera %s (%s x %s) found in port %s " %(backendName,h,w, port))
                port_ids.append(port)
            camera.release()
    return port_ids

def sigint_handler(sig, frame):
    print('Interrupted')
    if (runner):
        runner.stop()
    sys.exit(0)

signal.signal(signal.SIGINT, sigint_handler)

def help():
    print('python classify.py <path_to_model.eim> <Camera port ID, only required when more than 1 camera is present>')

def main(argv):
    try:
        opts, args = getopt.getopt(argv, "h", ["--help"])
    except getopt.GetoptError:
        help()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            help()
            sys.exit()

    if len(args) == 0:
        help()
        sys.exit(2)

    # set motor direction to forward, can add more decision logic later where appropriate
    os.system('echo 1 > /sys/class/gpio/gpio310/value')

    # disable motor until ML model is running
    os.system('echo 0 > /sys/class/gpio/gpio311/value')

    model = args[0]

    dir_path = os.path.dirname(os.path.realpath(__file__))
    modelfile = os.path.join(dir_path, model)

    print('MODEL: ' + modelfile)

    defender_dir = straight

    with ImageImpulseRunner(modelfile) as runner:
        try:
            model_info = runner.init()
            print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
            labels = model_info['model_parameters']['labels']
            if len(args)>= 2:
                videoCaptureDeviceId = int(args[1])
            else:
                port_ids = get_webcams()
                if len(port_ids) == 0:
                    raise Exception('Cannot find any webcams')
                if len(args)<= 1 and len(port_ids)> 1:
                    raise Exception("Multiple cameras found. Add the camera port ID as a second argument to use to this script")
                videoCaptureDeviceId = int(port_ids[0])

            camera = cv2.VideoCapture(videoCaptureDeviceId)
            ret = camera.read()[0]
            if ret:
                backendName = camera.getBackendName()
                w = camera.get(3)
                h = camera.get(4)
                print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w, videoCaptureDeviceId))
                camera.release()
            else:
                raise Exception("Couldn't initialize selected camera.")

            next_frame = 0 # limit to ~10 fps here

            for res, img in runner.classifier(videoCaptureDeviceId):
                if (next_frame > now()):
                    time.sleep((next_frame - now()) / 1000)

                # print('classification runner response', res)

                if "classification" in res["result"].keys():
                    print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
                    for label in labels:
                        score = res['result']['classification'][label]
                        print('%s: %.2f\t' % (label, score), end='')
                    print('', flush=True)

                elif "bounding_boxes" in res["result"].keys():
                    print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))

                    if (len(res["result"]["bounding_boxes"])) == 0:
                        os.system('echo 1 > /sys/class/gpio/gpio311/value')

                    for bb in res["result"]["bounding_boxes"]:
                        # object has been found & outputting which obj was found and where in the frame
                        print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
                        img = cv2.rectangle(img, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1)
                        # take an action for a given object (motor control code here)
                        if (bb['label']) == "pumpkin":
                            # stop if pumpkin is seen
                            #os.system('echo 0 > /sys/class/gpio/gpio311/value')
                            if (bb['x'] >= 40):
                                if (defender_dir == straight):
                                    defender_dir = half_turn_right(defender_dir)
                                else:
                                    defender_dir = full_turn_right(defender_dir)
                            else:
                                if (defender_dir == straight):
                                    defender_dir = half_turn_left(defender_dir)
                                else:
                                    defender_dir = full_turn_left(defender_dir)
                        else:
                            # otherwise keep moving
                            os.system('echo 1 > /sys/class/gpio/gpio311/value')

                if (show_camera):
                    cv2.imshow('edgeimpulse', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
                    if cv2.waitKey(1) == ord('q'):
                        break

                next_frame = now() + 100
        finally:
            if (runner):
                runner.stop()
                # disable motor when ML model stops
                os.system('echo 0 > /sys/class/gpio/gpio311/value')
                os.system('echo 0 > /sys/class/gpio/gpio310/value')

if __name__ == "__main__":
   main(sys.argv[1:])

pumpkin_halt.py

Python
Based on classify.py from Edge Impulse Python SDK examples - this is the script for detecting the obstacle and stopping when its seen.
#!/usr/bin/env python

import os
import cv2
import time
import signal
import sys, getopt
from edge_impulse_linux.image import ImageImpulseRunner

runner = None
# if you don't want to see a camera preview, set this to False
show_camera = True
if (sys.platform == 'linux' and not os.environ.get('DISPLAY')):
    show_camera = False

def now():
    return round(time.time() * 1000)

def get_webcams():
    port_ids = []
    for port in range(5):
        print("Looking for a camera in port %s:" %port)
        camera = cv2.VideoCapture(port)
        if camera.isOpened():
            ret = camera.read()[0]
            if ret:
                backendName =camera.getBackendName()
                w = camera.get(3)
                h = camera.get(4)
                print("Camera %s (%s x %s) found in port %s " %(backendName,h,w, port))
                port_ids.append(port)
            camera.release()
    return port_ids

def sigint_handler(sig, frame):
    print('Interrupted')
    if (runner):
        runner.stop()
    sys.exit(0)

signal.signal(signal.SIGINT, sigint_handler)

def help():
    print('python classify.py <path_to_model.eim> <Camera port ID, only required when more than 1 camera is present>')

def main(argv):
    try:
        opts, args = getopt.getopt(argv, "h", ["--help"])
    except getopt.GetoptError:
        help()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            help()
            sys.exit()

    if len(args) == 0:
        help()
        sys.exit(2)

    # set motor direction to forward, can add more decision logic later where appropriate
    os.system('echo 1 > /sys/class/gpio/gpio310/value')

    # disable motor until ML model is running
    os.system('echo 0 > /sys/class/gpio/gpio311/value')

    model = args[0]

    dir_path = os.path.dirname(os.path.realpath(__file__))
    modelfile = os.path.join(dir_path, model)

    print('MODEL: ' + modelfile)

    with ImageImpulseRunner(modelfile) as runner:
        try:
            model_info = runner.init()
            print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
            labels = model_info['model_parameters']['labels']
            if len(args)>= 2:
                videoCaptureDeviceId = int(args[1])
            else:
                port_ids = get_webcams()
                if len(port_ids) == 0:
                    raise Exception('Cannot find any webcams')
                if len(args)<= 1 and len(port_ids)> 1:
                    raise Exception("Multiple cameras found. Add the camera port ID as a second argument to use to this script")
                videoCaptureDeviceId = int(port_ids[0])

            camera = cv2.VideoCapture(videoCaptureDeviceId)
            ret = camera.read()[0]
            if ret:
                backendName = camera.getBackendName()
                w = camera.get(3)
                h = camera.get(4)
                print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w, videoCaptureDeviceId))
                camera.release()
            else:
                raise Exception("Couldn't initialize selected camera.")

            next_frame = 0 # limit to ~10 fps here

            for res, img in runner.classifier(videoCaptureDeviceId):
                if (next_frame > now()):
                    time.sleep((next_frame - now()) / 1000)

                # print('classification runner response', res)

                if "classification" in res["result"].keys():
                    print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
                    for label in labels:
                        score = res['result']['classification'][label]
                        print('%s: %.2f\t' % (label, score), end='')
                    print('', flush=True)

                elif "bounding_boxes" in res["result"].keys():
                    print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))

                    if (len(res["result"]["bounding_boxes"])) == 0:
                        GPIO.output(311, GPIO.HIGH)

                    for bb in res["result"]["bounding_boxes"]:
                        # object has been found & outputting which obj was found and where in the frame
                        print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
                        img = cv2.rectangle(img, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1)
                        # take an action for a given object (motor control code here)
                        if (bb['label']) == "pumpkin":
                            # stop if pumpkin is seen
                            os.system('echo 0 > /sys/class/gpio/gpio311/value')
                        else:
                            # otherwise keep moving
                            os.system('echo 1 > /sys/class/gpio/gpio311/value')

                if (show_camera):
                    cv2.imshow('edgeimpulse', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
                    if cv2.waitKey(1) == ord('q'):
                        break

                next_frame = now() + 100
        finally:
            if (runner):
                runner.stop()
                os.system('echo 0 > /sys/class/gpio/gpio311/value') # disable motor when ML model stops
                os.system('echo 0 > /sys/class/gpio/gpio310/value')

if __name__ == "__main__":
   main(sys.argv[1:])

Credits

Whitney Knitter

Whitney Knitter

169 projects • 1699 followers
All thoughts/opinions are my own and do not reflect those of any company/entity I currently/previously associate with.

Comments