import cv2
import numpy as np
import argparse
import time
def load_yolo():
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
output_layers = [layer_name for layer_name in net.getUnconnectedOutLayersNames()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
return net, classes, colors, output_layers
def detect_objects(img, net, outputLayers):
blob = cv2.dnn.blobFromImage(img, scalefactor=0.00392, size=(416, 416), mean=(0, 0, 0), swapRB=True, crop=False)
net.setInput(blob)
outputs = net.forward(outputLayers)
return blob, outputs
def get_box_dimensions(outputs, height, width):
boxes = []
confs = []
class_ids = []
for output in outputs:
for detect in output:
scores = detect[5:]
#print(scores)
class_id = np.argmax(scores)
conf = scores[class_id]
if conf > 0.5:
center_x = int(detect[0] * width)
center_y = int(detect[1] * height)
w = int(detect[2] * width)
h = int(detect[3] * height)
x = int(center_x - w/2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confs.append(float(conf))
class_ids.append(class_id)
return boxes, confs, class_ids
def draw_labels(boxes, confs, colors, class_ids, classes, img):
indexes = cv2.dnn.NMSBoxes(boxes, confs, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
status="vacant"
poslist=[(800, 400)]
spacecounter=3
box=[]
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
x1=np.arange(x,x+w)
y1=np.arange(y,w+h)
for i in range(len(x1)):
for j in range(len(y1)):
box.append((x1[i],y1[j]))
for i in poslist:
if i in box:
status='occupied'
spacecounter=4
else:
status=status
#color = colors[i]
color = (255,0,0)
if status=="occupied":
color = (0,255,0)
else:
color = (0,0,255)
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img, label, (x, y - 5), font, 2, color, 2)
cv2.imshow("Image", img)
return img, status, indexes, spacecounter
def start_video(video_path):
model, classes, colors, output_layers = load_yolo()
cap = cv2.VideoCapture(video_path)
videoFps = cap.get(cv2.CAP_PROP_FPS)
# print(videoFps)
result = cv2.VideoWriter('output1.avi',cv2.VideoWriter_fourcc('M','J','P','G'), videoFps, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while True:
# cap.set(cv2.CAP_PROP_FPS,10)
_, frame = cap.read()
# frame=cv2.resize(frame,((1200,600)))
height, width, channels = frame.shape
# print(frame.shape)
blob, outputs = detect_objects(frame, model, output_layers)
boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
finalImg, status, indexes, spacecounter = draw_labels(boxes, confs, colors, class_ids, classes, frame)
cv2.putText(finalImg, status, (800, 450), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0), 2)
cv2.putText(finalImg, "vacant", (450, 450), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0), 2)
cv2.putText(finalImg, "vacant", (1150, 450), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0), 2)
cv2.putText(finalImg, "vacant", (200, 450), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0), 2)
cv2.putText(finalImg, "vacant", (1500, 450), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0), 2)
cv2.putText(finalImg,f'Available parking:{spacecounter}/{3}',(100,100),cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
# print(spacecounter)
result.write(finalImg)
# print(boxes)
cv2.imshow("Image", finalImg)
if cv2.waitKey(1) & 0xFF == ord('y'):
break
cap.release()
result.release()
cv2.destroyAllWindows()
start_video("istockphoto-1144667192-640_adpp_is.mp4")
Comments