Frank Ononye
Published

Parking Occupancy Detection on Edge Impulse

Build an object detection model on Edge Impulse that displays both available and unavailable parking spots.

IntermediateProtip6 hours900

Things used in this project

Hardware components

MaaxBoard
Avnet MaaxBoard
×1
Avnet 5V/3A USB Type-c Power Supply
×1
32GB micro SD card
×1
Cable, USB to TTL Serial Converter 5V
Cable, USB to TTL Serial Converter 5V
×1
Ethernet Cable, 15 m
Ethernet Cable, 15 m
×1
MakerHawk TC66C USB Tester
×1
Mock Parking Lot
×1
Matchbox Cars
×1

Software apps and online services

Edge Impulse Studio
Edge Impulse Studio
OpenCV
OpenCV

Story

Read more

Code

pas2ei.py

Python
Python script to convert Pascal VOC obtained from CVAT to an Edge Impulse JSON file
# This script converts Pascal VOC to an Edge Impulse JSON file
# To use it, change the path to the path where your annotation files in Pascal VOC format are
# Change resize_ratio to 1 if you're not doing any resizing.
# The file will output as "bounding_boxes.labels"

# To use it, place it in the same folder as your labels and run the edge impulse uploader:
# edge-impulse-uploader *(must install the edge impulse CLI first)*
# see doc here https://docs.edgeimpulse.com/docs/cli-uploader

import xmltodict
import json
import os
from collections import Counter
import time

# start = time.time()

# If resizing, add your own resize ratio here. If not, change to 1
resize_ratio = 1

# minimum width/height of annotation
min_size = 15
min_total_size = 300

def Pascal2JSON(input_path, output_path):
    attrDict = dict()
    attrDict["version"] = 1
    attrDict["type"] = "bounding-box-labels"
    images = dict()

    for filename in  os.listdir(input_path):
        annotations = list()
        if filename.endswith(".xml"):
            filename_jpg = filename.replace(".xml",".jpg")

            print(filename)
            doc = xmltodict.parse(open(os.path.join(input_path, filename)).read())
            #print(doc['annotation']['filename'])


            n = 0
            if 'object' in doc['annotation']:
                #print("object found")
                #print(doc['annotation']['object'])

                for obj in doc['annotation']['object']:
                    #print("Object before conversion= %s", obj)
                    if obj == "name": # hacky code to break in case the xml file only has a single Object
                        n = 1
                        obj = doc['annotation']['object']
                    #print("Object after conversion = %s", obj)
                    annotation = dict()
                    #print(obj['name'])
                    annotation["label"] = str(obj['name']) #TypeError: string indices must be integers
                    xmin = int(obj["bndbox"]["xmin"]) / resize_ratio
                    ymin = int(obj["bndbox"]["ymin"]) / resize_ratio
                    width = (int(obj["bndbox"]["xmax"]) / resize_ratio) - xmin
                    height = (int(obj["bndbox"]["ymax"]) / resize_ratio) - ymin

                    #if height or width of any of the annotations is less than min_size, skip to next filename
                    #if(height<min_size) or (width < min_size):
                    if(height*width < min_total_size):
                        annotations = list()
                        too_small_image = os.path.join(output_path, filename_jpg)
                        print(too_small_image)
                        too_small_annotation = os.path.join(input_path, filename)
                        if os.path.exists(too_small_image):
                            os.remove(too_small_image)
                        if os.path.exists(too_small_annotation):
                            os.remove(too_small_annotation)
                        print("file {0} has annotations smaller than {1}px".format(filename, min_size))
                        break

                    annotation["x"] = round(xmin)
                    annotation["y"] = round(ymin)
                    annotation["width"] = round(width)
                    annotation["height"] = round(height)

                    annotations.append(annotation)
                    if n==1:
                        images[filename_jpg] = annotations
                        #print("n is one")
                        break

                    images[filename_jpg] = annotations



            #image[str(doc['annotation']['filename'])] = annotations
        #images.append(image)

    attrDict["boundingBoxes"] = images

    # Write the dictionary created from XML to a JSON string
    jsonString = json.dumps(attrDict)
    with open((os.path.join(output_path, "bounding_boxes.labels.json")), "w") as f:
        f.write(jsonString)

def CleanUpExtraImages(input_path, output_path):
    for filename in os.listdir(output_path):
        if filename.endswith(".jpg"):
            filename_xml = filename.replace(".jpg", ".xml")
            if not os.path.exists(os.path.join(output_path, filename_xml)):
                os.remove(os.path.join(output_path, filename))

input_path = "/home/fononye/smart_parking/Annotations/Webcam"
output_path = "/home/fononye/smart_parking/EI_Output"
Pascal2JSON(input_path, output_path)

#CleanUpExtraImages(input_path, output_path)

# end = time.time() - start
# print("time is {0}.format(end))

ei_benchmark_smart_parking_temp.py

Python
Python script used to measure object detection performance parameters from a .eim file (number of boxes detected and their accuracy and inference speed)
#!/usr/bin/env python

import cv2
import os
import sys
import getopt
import numpy as np
from edge_impulse_linux.image import ImageImpulseRunner

runner = None
show_camera = True

def help():
    print('ei_benchmark_smart_parking_temp.py <.eim path> <test image path>')
    #Change the above print line but keep the same format ('.py file <.eim file path> <test image path>')

def main(argv):
    try:
        opts, args = getopt.getopt(argv, "h", ["--help"])
    except getopt.GetoptError:
        help()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            help()
            sys.exit()

    if len(args) < 2:
        help()
        sys.exit(2)

    model = args[0]
    test_image_path = args[1]

    #For the next 3 lines, make sure the path is correct. The path to root, then the .eim file, then the test image path.
    dir_path = "/home/root"
    modelfile = ".eim file"
    test_image_path_absolute = "test image path"

    print('MODEL:', modelfile)
    print('TEST IMAGE:', test_image_path_absolute)

    with ImageImpulseRunner(modelfile) as runner:
        try:
            model_info = runner.init()
            print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
            labels = model_info['model_parameters']['labels']

            img = cv2.imread(test_image_path_absolute)

            features = []

            EI_CLASSIFIER_INPUT_WIDTH = runner.dim[0]
            EI_CLASSIFIER_INPUT_HEIGHT = runner.dim[1]

            in_frame_cols = img.shape[1]
            in_frame_rows = img.shape[0]

            factor_w = EI_CLASSIFIER_INPUT_WIDTH / in_frame_cols
            factor_h = EI_CLASSIFIER_INPUT_HEIGHT / in_frame_rows

            largest_factor = factor_w if factor_w > factor_h else factor_h

            resize_size_w = int(largest_factor * in_frame_cols)
            resize_size_h = int(largest_factor * in_frame_rows)
            resize_size = (resize_size_w, resize_size_h)

            resized = cv2.resize(img, resize_size, interpolation=cv2.INTER_AREA)

            crop_x = int((resize_size_w - resize_size_h) / 2) if resize_size_w > resize_size_h else 0
            crop_y = int((resize_size_h - resize_size_w) / 2) if resize_size_h > resize_size_w else 0

            crop_region = (crop_x, crop_y, EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT)

            cropped = resized[crop_region[1]:crop_region[1]+crop_region[3], crop_region[0]:crop_region[0]+crop_region[2]]

            if runner.isGrayscale:
                cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
                pixels = np.array(cropped).flatten().tolist()

                for p in pixels:
                    features.append((p << 16) + (p << 8) + p)
            else:
                cv2.imwrite('test.jpeg', cropped)

                pixels = np.array(cropped).flatten().tolist()

                for ix in range(0, len(pixels), 3):
                    b = pixels[ix + 0]
                    g = pixels[ix + 1]
                    r = pixels[ix + 2]
                    features.append((r << 16) + (g << 8) + b)


            # Do the feature extraction or classification here based on your model

            res = runner.classify(features)

            if "classification" in res["result"].keys():
                print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
                for label in labels:
                    score = res['result']['classification'][label]
                    print('%s: %.2f\t' % (label, score), end='')
                print('', flush=True)

            elif "bounding_boxes" in res["result"].keys():
                print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))

                # Resize the image for display
                resized_img = cv2.resize(img, (230, 230))  # Change dimensions as needed

                # Draw bounding boxes on the resized image
                detected_objects = {}  # Dictionary to store bounding boxes by label
                for bb in res["result"]["bounding_boxes"]:
                    label = bb['label']
                    value = bb['value']
                    x, y, w, h = bb['x'], bb['y'], bb['width'], bb['height']

                    # Check if a bounding box with the same label is already detected within the specified rectangular range
                    similar_bounding_boxes = [bbox for bbox in detected_objects.get(label, [])
                                              if abs(bbox['x'] - x) < 30  # 30 pixels horizontally
                                              and abs(bbox['y'] - y) < 70]  # 70 pixels vertically
                    if similar_bounding_boxes:
                        # Compare the confidence scores and keep the one with the higher score
                        max_precision_box = max(similar_bounding_boxes, key=lambda bbox: bbox['value'])
                        if value < max_precision_box['value']:
                            detected_objects[label].remove(max_precision_box)
                            detected_objects[label].append({'x': x, 'y': y, 'width': w, 'height': h, 'value': value})
                    else:
                        detected_objects.setdefault(label, []).append({'x': x, 'y': y, 'width': w, 'height': h, 'value': value})
                        
                    # Debugging to check the label being processed     
                    
                    print(f"Processing label: {label}, Score: {value:.2f}")
		
                # Draw bounding boxes on the resized image
                for label, bboxes in detected_objects.items():
                    for bb in bboxes:
                        if label == 'taken':
                            box_color = (0, 0, 255)  # Red color for 'taken' objects
                        elif label == 'free':
                            box_color = (0, 255, 0)  # Green color for 'free' objects
                        else:
                            box_color = (255, 0, 0)  # Blue color for other labels

                        # Draw bounding box
                        cv2.rectangle(resized_img, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), box_color, 2)
                        cv2.putText(resized_img, f'{label} ({bb["value"]:.2f})', (bb['x'], bb['y'] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, box_color, 2)

                # Save the annotated image
                annotated_image_path = '/home/root/smart_annotated_image.jpg'
                cv2.imwrite(annotated_image_path, resized_img)

        finally:
            if runner:
                runner.stop()

if __name__ == "__main__":
    main(sys.argv[1:])

space_counter.py

Python
Python script to take an Edge Impulse model (preferably FOMO) and count the number of object on a video file
import cv2
import os
import time
import sys
import numpy as np
from edge_impulse_linux.image import ImageImpulseRunner

# Constants: Add the eim file and video file paths respectively
modelfile = '.eim path'
videofile = 'video path'

runner = None
show_camera = True

# If you don't want to see a video preview, set this to False
if (sys.platform == 'linux' and not os.environ.get('DISPLAY')):
    show_camera = False
print('MODEL: ' + modelfile)

with ImageImpulseRunner(modelfile) as runner:
    try:
        model_info = runner.init()
        print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
        labels = model_info['model_parameters']['labels']
        vidcap = cv2.VideoCapture(videofile)
        sec = 0
        start_time = time.time()

        # Get frame width and height after initializing vidcap
        frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = vidcap.get(cv2.CAP_PROP_FPS)

        def getFrame(sec):
            vidcap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000)
            hasFrames, image = vidcap.read()
            if hasFrames:
                return image
            else:
                print('Failed to load frame', videofile)
                exit(1)

        img = getFrame(sec)

        TOP_Y = 30
        NUM_COLS = 5
        COL_WIDTH = int(vidcap.get(3) / NUM_COLS)
        DETECT_FACTOR = 1.5
        count_free = 0
        count_taken = 0

        while img.size != 0:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            features, cropped = runner.get_features_from_image(img)
            img2 = cropped
            COL_WIDTH = int(np.shape(cropped)[0] / NUM_COLS)
            cv2.imwrite('debug.jpg', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))

            res = runner.classify(features)

            count_free_frame = 0
            count_taken_frame = 0

            if "bounding_boxes" in res["result"].keys():
                detected_objects = {}  # Using a dictionary to store bounding boxes by label
                print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
                for bb in res["result"]["bounding_boxes"]:
                    print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
                    label = bb['label']
                    if label == 'taken':
                        color = (0, 0, 255)  # Red color for 'taken' objects
                        count_taken_frame += 1
                    elif label == 'free':
                        color = (0, 255, 0)  # Green color for 'free' objects
                        count_free_frame += 1
                    else:
                        color = (255, 255, 255)  # White color for other labels

                    # Check if a bounding box with the same label is already detected within the specified rectangular range
                    similar_bounding_boxes = [bbox for bbox in detected_objects.get(label, [])
                                              if abs(bbox['x'] - bb['x']) < 30  # 30 pixels horizontally
                                              and abs(bbox['y'] - bb['y']) < 70]  # 70 pixels vertically
                    if similar_bounding_boxes:
                        # Compare the precision scores and keep the one with the higher score
                        max_precision_box = max(similar_bounding_boxes, key=lambda x: x['value'])
                        if bb['value'] > max_precision_box['value']:
                            detected_objects[label].remove(max_precision_box)
                            detected_objects[label].append(bb)
                    else:
                        detected_objects.setdefault(label, []).append(bb)

                # Draw bounding boxes
                for label, bboxes in detected_objects.items():
                    for bb in bboxes:
                        if label == 'taken':
                            color = (0, 0, 255)  # Red color for 'taken' objects
                        elif label == 'free':
                            color = (0, 255, 0)  # Green color for 'free' objects
                        else:
                            color = (255, 255, 255)  # White color for other labels

                        # Draw bounding box
                        img2 = cv2.rectangle(cropped, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), color, 1)

                # Update counts based on the detected objects
                count_free = len(detected_objects.get('free', []))
                count_taken = len(detected_objects.get('taken', []))
			
            # Display the processed frame
            if show_camera:
                im2 = cv2.resize(img2, dsize=(800, 800))
                cv2.putText(im2, f'Free: {count_free} Taken: {count_taken}', (15, 750), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
                cv2.imshow('edgeimpulse', cv2.cvtColor(im2, cv2.COLOR_RGB2BGR))
                print(f'Free: {count_free}, Taken: {count_taken}')
                if cv2.waitKey(1) == ord('q'):
                    break

            sec = time.time() - start_time
            sec = round(sec, 2)
            img = getFrame(sec)

        # Print total count
        print(f'Total Free Spots: {count_free}, Total Taken Spots: {count_taken}')

    except Exception as e:
        print("Error occurred:", e)
    finally:
        if runner:
            runner.stop()

Credits

Frank Ononye

Frank Ononye

4 projects • 11 followers

Comments