VPA
Published © GPL3+

System for collecting information about the behavior of pets

I wonder what our pets do during the day. Especially when we are not at home. We get information from video surveillance cameras on the Rasp

IntermediateFull instructions providedOver 4 days236
System for collecting information about the behavior of pets

Things used in this project

Hardware components

Raspberry Pi 4 Model B
Raspberry Pi 4 Model B
×1
DVR + 4 analog cameras
×1

Story

Read more

Code

pets04.py

Python
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import sys
import argparse
import imutils
import time
import cv2
from urllib.request import urlopen
from datetime import datetime
import os


tekcamera=1
pathSaveImg="/home/petin/python3_prgs_1/OpenVino01"
cameras=[[0,0,0,0,0],
	[0,0,0,0,0],
	[False,True,False,True,False],
	["","Camera1","Camera2","Camera3","Camera4"]]
for i in range(1,5):
	cameras[1][i] = 'rtsp://admin:191066@192.168.0.109:554/mode=real&idc='+str(i)+'’&ids=1'

fps = FPS().start()

# получение аргументов командной строки
#  --prototxt путь к файлу mobilenet-ssd.prototxt
# --model     путь к файлу модели mobilenet-ssd.caffemodel
# --show вывод изображений с камер в окна
# -c   минимальная  точность определения объекта


ap = argparse.ArgumentParser()
ap.add_argument("--prototxt", required=True,
	help="path to Caffe 'deploy' prototxt file")
ap.add_argument("--model", required=True,
	help="path to Caffe pre-trained model")
ap.add_argument("--show", required=True, 
	help="Show cv2.imshow)")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
	help="minimum probability to filter weak detections")
args = vars(ap.parse_args())



# загрузка модели

CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
	"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
	"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
	"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# обработка в Neural Compute Stick
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

# инициализация получения потока с камер
print("[INFO] starting video stream...")

for i in range(1,5):
	cameras[0][i] = cv2.VideoCapture(cameras[1][i])
print("OK")
time.sleep(5.0)

detected_objects = []
# цикл
while(1):
	tekcamera = tekcamera+1
              if tekcamera+==5:
                     tekcamera=1
                    logfile=open("last.txt","w+")
                    ftime=datetime.now()
                   str1=.strftime("%d-%m-%Y % %H:%M:%S\n ")
                    logfile.write(str1)
                    logfile.close()

	if cameras[2][tekcamera] == False:
		continue	
	# получение кадров из потока
	ret, frame = cameras[0][tekcamera].read()
	
	frame = imutils.resize(frame, width=800)
		
	# grab the frame dimensions and convert it to a blob
	(h, w) = frame.shape[:2]
	blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
		0.007843, (300, 300), 127.5)

	# pass the blob through the network and obtain the detections and
	# predictions
	net.setInput(blob)
	detections = net.forward()
    
	# обработка результатов детектирования
	print("******************")
	for i in np.arange(0, detections.shape[2]):
		confidence = detections[0, 0, i, 2]
		idx = int(detections[0, 0, i, 1])
		#if confidence > args["confidence"] and idx==15 :
		if confidence > args["confidence"] and (idx==8 or idx==12) :
			# save files
			ftime=datetime.now()
			if(os.path.exists(pathSaveImg+"/cam"+str(tekcamera)+"/"+ftime.strftime("%d-%m-%Y"))==False):
				os.mkdir(pathSaveImg+"/cam"+str(tekcamera)+"/"+ftime.strftime("%d-%m-%Y"))
			f=cv2.imwrite(pathSaveImg+"/cam"+str(tekcamera)+"/"+ftime.strftime("%d-%m-%Y")+"/_"+ftime.strftime("%H:%M:%S.%f")+".jpg", frame)
			print("write file = ",f)


			# extract the index of the class label from the
			# `detections`, then compute the (x, y)-coordinates of
			# the bounding box for the object
			#idx = int(detections[0, 0, i, 1])
			box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
			(startX, startY, endX, endY) = box.astype("int")

			# draw the prediction on the frame
			label = "{}: {:.2f}%".format(CLASSES[idx],
				confidence * 100)
			print(confidence,"  ",idx," - ",CLASSES[idx])
			detected_objects.append(label)
			cv2.rectangle(frame, (startX, startY), (endX, endY),
				COLORS[idx], 2)
			y = startY - 15 if startY - 15 > 15 else startY + 15
			cv2.putText(frame, label, (startX, y),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
			
	# show the output frame
	if args["show"] == "True":
		cv2.imshow(cameras[3][tekcamera], frame)

	key = cv2.waitKey(1) & 0xFF
	# выход по клавише ‘q’
	if key == ord("q"):
		break

	fps.update()


fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()

pets_send_telegram.py

Python
import telebot
from telebot import types
from datetime import datetime, timedelta
from pathlib import Path

keyboard1 = [["","Камера 1","Камера 2","Камера 3","Камера 4","Не надо"],["","cam1","cam2","cam3","cam4","no"]]
bot = telebot.TeleBot('your-token');

@bot.message_handler(content_types=['text', 'document', 'audio'])
def get_text_messages(message):
	chatId=message.chat.id
	print(chatId)
	key_1=[0,0,0,0,0,0]
	ftime=datetime.now()
	print(ftime.day,"  ",ftime.hour)
	day=ftime.strftime("%d-%m-%Y")
	if message.text == "/help":
		bot.send_message(message.from_user.id, "Привет, здесь ты можешь посмотреть что делали мои котики за день 9-00 - 18-00. Набери /video")
	elif message.text == "/video":
		msg="здесь ты можешь посмотреть что делали мои котики за день 9-00 - 18-00 "+day
		bot.send_message(message.from_user.id, msg)
		keyboard = types.InlineKeyboardMarkup(); # клавиатура
		for i in range(1,6):
			#кнопка
			key_1[i] = types.InlineKeyboardButton(text=keyboard1[0][i], callback_data=keyboard1[1][i]);  
			#добавляем кнопку в клавиатуру
			keyboard.add(key_1[i]);   
		bot.send_message(message.from_user.id, "Выбери камеру", reply_markup=keyboard)   
	else:
		bot.send_message(message.from_user.id, "Я тебя не понимаю. Напиши /help.")

@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
	ftime=datetime.now()
	day=ftime.strftime("%d-%m-%Y")

	if call.data.find("cam") < 0:
		bot.send_message(call.message.chat.id, 'No');		
	else:
		if ftime.hour >= 18:
			bot.send_message(call.message.chat.id, 'Видео с камеры '+call.data.replace("cam","")+'. Wait ....');
			videofile = Path(call.data+'-'+day+'.mp4')
			if videofile.is_file():
				video = open(call.data+'-'+day+'.mp4', 'rb')
				bot.send_video(call.message.chat.id, video)
				bot.send_message(call.message.chat.id, "Загружено")
			else:
				bot.send_message(call.message.chat.id, 'Видео отсутствует !!!');	
		else:
			bot.send_message(call.message.chat.id, 'Просмотр видео текущего дня только после 18:00, Просмотр за предыдущий день');
			ftime=datetime.now()- timedelta(days=1)
			dayold=ftime.strftime("%d-%m-%Y")
			videofile = Path(call.data+'-'+dayold+'.mp4')
			if videofile.is_file():
				video = open(call.data+'-'+dayold+'.mp4', 'rb')
				bot.send_video(call.message.chat.id, video)
				bot.send_message(call.message.chat.id, "Загружено")
			else:
				bot.send_message(call.message.chat.id, 'Видео отсутствует !!!');	
			


bot.polling(none_stop=True, interval=0)

Credits

VPA

VPA

6 projects • 28 followers
Author of more than 10 books on programming (web, Arduino, Raspberry pi, IoT), developer and teacher.

Comments