Gustavs Andersons
Published © GPL3+

Empowering Disabled Gamers with Gesture-Controlls for Tetris

In a nutshell, the project empowers disabled gamers with the option to control games with gestures or face movement.

BeginnerFull instructions provided3 hours149

Things used in this project

Hardware components

Webcam, Logitech® HD Pro
Webcam, Logitech® HD Pro
×1

Software apps and online services

VS Code
Microsoft VS Code
With python

Story

Read more

Code

Main.py

Python
# This is the main code. Before start, run CamSim....py (Optional)
# You can find instructions in hackster.io
# Remember to put both (the .h5 and .txt files in the same folder!)
# Before running remember to replace camera port and paths (example given)
# For this to run install these libraries :
# pip install teachable-machine
# pip install opencv-python
# pip install pynput
 
from teachable_machine import TeachableMachine
from pynput.keyboard import Key, Controller
import cv2
import numpy as np
import io
from PIL import Image
import time

countdown_seconds = 30 # If you need more time before the code starts, change this.

camera_port = 2 # !!! REPLACE this with your accual camera port. You can check with the cameraPortTest.py code

# !!! REPLACE these paths with the absolute paths to your model and labels files
model_path = "C:\\Users\\Admin\\Desktop\\keras_model.h5"
labels_path = "C:\\Users\\Admin\\Desktop\\labels.txt"

keyboard = Controller()

#Countdown timer
for seconds_remaining in range(countdown_seconds, 0, -1):
    print(f"Starting in {seconds_remaining} second{'s' if seconds_remaining > 1 else ''}")
    time.sleep(1)

print("Countdown complete. Starting code!")

model = TeachableMachine(model_path=model_path, labels_file_path=labels_path)

# Open the camera stream
cap = cv2.VideoCapture(camera_port)

while True:
    _, img = cap.read()

    # Convert the image (numpy array) to bytes
    img_bytes = cv2.imencode('.jpg', img)[1].tobytes()
    
    # Classify the image
    result = model.classify_image(io.BytesIO(img_bytes))

    # Extract classification results
    class_index = result["class_index"]
    class_name = result["class_name"]
    class_confidence = result["class_confidence"]
    predictions = result["predictions"]

    # Print prediction and confidence score
    print("Class Index:", class_index)
    print("Class Confidence:", class_confidence)
    print("Predictions:", predictions)

    # Show the image in a window
    cv2.imshow("Webcam Image", img)

    if class_index == 0:
        print("Normal")
    if class_index == 1:
        print("Left")
        keyboard.press(Key.left)
        keyboard.release(Key.left)
    if class_index == 2:
        print("Right")
        keyboard.press(Key.right)
        keyboard.release(Key.right)
    if class_index == 3:
        print("Rotate")
        keyboard.press(Key.up)
        keyboard.release(Key.up)

    time.sleep(0.5)

    # Listen to the keyboard for presses
    keyboard_input = cv2.waitKey(1)

    # 27 is the ASCII for the Esc key on your keyboard
    if keyboard_input == 27:
        break

cap.release()
cv2.destroyAllWindows()

CamSimFace.py

Python
# This code is used to make an video camera live stream that shows only the face to train the model
# Before running the script, you need to install the necessary packages. You can install OpenCV with pip install opencv-python and pip install pyvirtualcam opencv-python
# Also you have to have OBS installed and setup the obsvirtualcam

import cv2
import numpy as np
import pyvirtualcam

# Load the pre-trained face detection model
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

with pyvirtualcam.Camera(width=640, height=480, fps=30) as cam:
    while True:
        # Capture a frame from the webcam
        cap = cv2.VideoCapture(0)  # Use 0 for the default webcam, or change to the appropriate camera index
        ret, frame = cap.read()
        
        if not ret:
            continue

        # Convert the frame to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Detect faces in the grayscale frame
        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

        # If faces are detected, process and send the frame to the virtual camera
        if len(faces) > 0:
            x, y, w, h = faces[0]
            face = frame[y:y+h, x:x+w]

            # Convert the processed frame to grayscale
            gray_face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

            # Convert the grayscale image back to BGR format
            gray_face_bgr = cv2.cvtColor(gray_face, cv2.COLOR_GRAY2BGR)

            # Resize the processed frame to match the virtual camera dimensions
            resized_face = cv2.resize(gray_face_bgr, (cam.width, cam.height))

            # Send the frame to the virtual camera
            cam.send(resized_face)

        # Release the webcam capture
        cap.release()

CamSimLips.py

Python
import cv2
import numpy as np
import pyvirtualcam

# !!! REPLACE this paths with the absolute paths to the model
lip_cascade = cv2.CascadeClassifier('Test\\haarcascade_mcs_mouth.xml')

with pyvirtualcam.Camera(width=640, height=480, fps=30) as cam:
    while True:
        # Capture a frame from the webcam
        cap = cv2.VideoCapture(0)  # Use 0 for the default webcam, or change to the appropriate camera index
        ret, frame = cap.read()
        
        if not ret:
            continue

        # Convert the frame to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Detect lips in the grayscale frame
        lips = lip_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

        # If lips are detected, process and send the frame to the virtual camera
        if len(lips) > 0:
            for (x, y, w, h) in lips:
                lip = frame[y:y+h, x:x+w]

                # Convert the processed frame to grayscale
                gray_lip = cv2.cvtColor(lip, cv2.COLOR_BGR2GRAY)

                # Convert the grayscale image back to BGR format
                gray_lip_bgr = cv2.cvtColor(gray_lip, cv2.COLOR_GRAY2BGR)

                # Resize the processed frame to match the virtual camera dimensions
                resized_lip = cv2.resize(gray_lip_bgr, (cam.width, cam.height))

                # Send the frame to the virtual camera
                cam.send(resized_lip)

        # Release the webcam capture
        cap.release()

cameraPortTest.py

Python
import cv2

# Get the list of available camera ports
available_cameras = []
for i in range(10):
    cap = cv2.VideoCapture(i)
    if cap.isOpened():
        available_cameras.append(i)
    cap.release()

# Create windows for each available camera
for port in available_cameras:
    cap = cv2.VideoCapture(port)
    window_title = f"Camera Port {port}"

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Resize the frame to a smaller size
        frame = cv2.resize(frame, (320, 240))

        # Display the frame in a window with the camera port title
        cv2.imshow(window_title, frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyWindow(window_title)

cv2.destroyAllWindows()

My github page

Has all the code needed

Credits

Gustavs Andersons

Gustavs Andersons

7 projects • 4 followers
Like coding 🖥️ , drone photography + FPV 🛫 , soldering 🔋 , 3d modeling 🖱️ , animations & video editing + event lights and audio 🎚️ !

Comments