Welcome to Hackster!
Hackster is a community dedicated to learning hardware, from beginner to pro. Join us, it's free!
PradeepGaurav Yogeshwar
Published © MIT

Driver monitoring System using AMD Kria KR260 Robotics kit

Driver Monitoring System to detects & monitor the Driver drowsiness, distraction, yawning, fatigue etc.

IntermediateWork in progress8 hours374
Driver monitoring System using AMD Kria KR260 Robotics kit

Things used in this project

Story

Read more

Schematics

05-driver-drowsiness-detection-application-algorithm-flowchart_v4_WpgClKjYSu.jpg

This is the flow-chart of how whole process should work.

Code

Drowsiness Detection

Python
It detects the Driver drowsiness using eye openness ratio., If eye is close for more than 2 second it will raise the alarm sound to awake the driver.
from scipy.spatial import distance
# from imutils.video import VideoStream
# import imutils
from imutils.video import FPS
from pygame import mixer
import cv2
import mediapipe as mp
from scipy.spatial import distance
from pygame import mixer
import cv2
import mediapipe as mp
import time

mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
# mp_holistic = mp.solutions.holistic
# mp_pose = mp.solutions.pose
mp_facemesh = mp.solutions.face_mesh
denormalize_coordinates = mp_drawing._normalized_to_pixel_coordinates

mixer.init()
# give the directory to the music file
mixer.music.load("music.wav")


def distance(point_1, point_2):
    """Calculate l2-norm between two points"""
    dist = sum([(i - j) ** 2 for i, j in zip(point_1, point_2)]) ** 0.5
    return dist


def get_ear(landmarks, refer_idxs, frame_width, frame_height, frame):
    """
    Calculate Eye Aspect Ratio for one eye.
 
    Args:
        landmarks: (list) Detected landmarks list
        refer_idxs: (list) Index positions of the chosen landmarks
                            in order P1, P2, P3, P4, P5, P6
        frame_width: (int) Width of captured frame
        frame_height: (int) Height of captured frame
 
    Returns:
        ear: (float) Eye aspect ratio
    """
    try:
        # Compute the euclidean distance between the horizontal
        coords_points = []
        for i in refer_idxs:
            lm = landmarks[i]
            coord = denormalize_coordinates(lm.x, lm.y, 
                                             frame_width, frame_height)
            
            coords_points.append(coord)
 
        # Eye landmark (x, y)-coordinates
        # python index start from 0 so we have to substract 1 from each index
        P2_P6 = distance(coords_points[1], coords_points[5])
        P3_P5 = distance(coords_points[2], coords_points[4])
        P1_P4 = distance(coords_points[0], coords_points[3])
 
        # Compute the eye aspect ratio
        ear = (P2_P6 + P3_P5) / (2.0 * P1_P4)

        for cord_point in coords_points:

            if ear < 0.22:
                cv2.circle(frame, cord_point, 2, (0,0,255), -1)
            else:
                cv2.circle(frame, cord_point, 2, (0,255,0), -1)

 
    except:
        ear = 0.0
        coords_points = None
 
    return ear, coords_points


def get_mar(landmarks, refer_idxs, frame_width, frame_height, frame):

    """Calculate Mouth Aspect Ratio
    Args:
        landmarks: (list) Detected landmarks list
        refer_idxs: (list) Index positions of the chosen landmarks
                            in order Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8
        frame_width: (int) Width of captured frame
        frame_height: (int) Height of captured frame
 
    Returns:
        mar: (float) Mouth aspect ratio  """ 

    try:
        # Compute the euclidean distance between the horizontal
        coords_points = []
        for i in refer_idxs:
            lm = landmarks[i]
            coord = denormalize_coordinates(lm.x, lm.y, 
                                             frame_width, frame_height)
            
            coords_points.append(coord)
 
        # Eye landmark (x, y)-coordinates
        # python index start from 0 so we have to substract 1 from each index
        Q2_Q8 = distance(coords_points[1], coords_points[7])
        Q3_Q7 = distance(coords_points[2], coords_points[6])
        Q4_Q6 = distance(coords_points[3], coords_points[5])
        Q1_Q5 = distance(coords_points[0], coords_points[4])
        # Compute the mouth aspect ratio
        mar = (Q2_Q8 + Q3_Q7 + Q4_Q6) / (3.0 * Q1_Q5)

        for cord_point in coords_points:

            if mar > 0.4:
                cv2.circle(frame, cord_point, 2, (0,0,255), -1)
            else:
                cv2.circle(frame, cord_point, 2, (0,255,0), -1)

 
    except:
        mar = 0.0
        coords_points = None
 
    return mar, coords_points


def calculate_avg_ear(landmarks, left_eye_idxs, right_eye_idxs, image_w, image_h, frame):
    """Calculate Eye aspect ratio"""
    left_ear, left_lm_coordinates = get_ear(
                                      landmarks, 
                                      left_eye_idxs, 
                                      image_w, 
                                      image_h,
                                      frame
                                    )

    right_ear, right_lm_coordinates = get_ear(
                                      landmarks, 
                                      right_eye_idxs, 
                                      image_w, 
                                      image_h,
                                      frame
                                    )
    Avg_EAR = (left_ear + right_ear) / 2.0
 
    return Avg_EAR, (left_lm_coordinates, right_lm_coordinates)


# def eye_aspect_ratio(eye):
# 	A = distance.euclidean(eye[1], eye[5])
# 	B = distance.euclidean(eye[2], eye[4])
# 	C = distance.euclidean(eye[0], eye[3])
# 	ear = (A + B) / (2.0 * C)
# 	return ear
	
# The chosen 12 points:   P1,  P2,  P3,  P4,  P5,  P6
chosen_left_eye_idxs  = [362, 385, 387, 263, 373, 380]
chosen_right_eye_idxs = [33,  160, 158, 133, 153, 144]

all_chosen_idxs = chosen_left_eye_idxs + chosen_right_eye_idxs

# The chosen 8 points 
#   on mouth: Q1, Q2, Q3, Q4,  Q5,  Q6,  Q7, Q8
mouth_idxs = [61, 41, 13, 271, 291, 402, 14, 178]

thresh = 0.22
frame_check = 48
# detect = dlib.get_frontal_face_detector()
# predict = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

# (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"]
# (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
# (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"]
cap=cv2.VideoCapture("Can You Watch This Without Yawning_.mp4")
# cap=cv2.VideoCapture(0)
flag=0

# start the FPS counter
fps = FPS().start()

# Rest of the code...

# Initialize timer variables
start_time = None
end_time = None
alert_interval = 2  # Time in seconds for the alert to be active

# Rest of the code...

with mp_facemesh.FaceMesh(refine_landmarks=True) as face_mesh:
    while True:
        ret, frame=cap.read()
        time.sleep(0.05)
        # frame = imutils.resize(frame, width=450)
        frame = cv2.cvtColor(cv2.flip(frame, 1), cv2.COLOR_BGR2RGB)
        imgH, imgW, _ = frame.shape
        # Running inference using static_image_mode
        results = face_mesh.process(frame).multi_face_landmarks
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        # If detections are available.
        if results:
            for face_id, face_landmarks in enumerate(results):
                landmarks = face_landmarks.landmark
                EAR, _ = calculate_avg_ear(
                    landmarks,
                    chosen_left_eye_idxs,
                    chosen_right_eye_idxs,
                    imgW,
                    imgH,
                    frame
                )

                MAR, mar_position = get_mar(
                    landmarks,
                    mouth_idxs,
                    imgW,
                    imgH,
                    frame
                )
            
                cv2.putText(frame,"EAR:" + str(round(EAR,2)), (5, 40), 
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0),2)
                
                cv2.putText(frame, "MAR:" + str(round(MAR,2)), (imgW-200, 40),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)


            if (EAR < thresh) or (MAR > 0.4):
                if start_time is None:
                    start_time = time.time()  # Start the timer

                if end_time is not None and (time.time() - end_time) >= alert_interval:
                    start_time = time.time()  # Start the timer again after the alert interval

                if (time.time() - start_time) >= alert_interval:
                    cv2.putText(frame, "****************ALERT!****************", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    cv2.putText(frame, "****************ALERT!****************", (10, 325),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    mixer.music.play()
                    end_time = time.time()  # Save the end time of the alert
            else:
                start_time = None  # Reset the timer
                end_time = None  # Reset the end time of the alert

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

        # update the FPS counter
        fps.update()
        
    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    cap.release() 

Credits

Pradeep
2 projects • 2 followers
My name is Pradeep. I have been working as an embedded system engineer since last 6+ years and Developed various Electronics products
Contact
Gaurav Yogeshwar
0 projects • 1 follower
Contact

Comments

Please log in or sign up to comment.