Develop an application to detect and track moving cars.
First import the video using the open cv library. Once the video opened successfully, get the video width and height. This information can be used to focus only on the part of the image that is of interest.
In order to detect motion, we follow these steps:
Baseline code reference:
Install the required Python libraries
pip install pyttsx3
pip install pywin32
pip install numpy
pip install opencv-python
These libraries apply to both exercise 1.1 and 1.2
pip install pyttsx3
Requirement already satisfied: pyttsx3 in c:\users\sjhen\anaconda3\lib\site-packages (2.90) Requirement already satisfied: pypiwin32 in c:\users\sjhen\anaconda3\lib\site-packages (from pyttsx3) (223) Requirement already satisfied: pywin32 in c:\users\sjhen\anaconda3\lib\site-packages (from pyttsx3) (228) Requirement already satisfied: comtypes in c:\users\sjhen\anaconda3\lib\site-packages (from pyttsx3) (1.1.10) Note: you may need to restart the kernel to use updated packages.
pip install pywin32
Requirement already satisfied: pywin32 in c:\users\sjhen\anaconda3\lib\site-packages (228) Note: you may need to restart the kernel to use updated packages.
pip install numpy
Requirement already satisfied: numpy in c:\users\sjhen\anaconda3\lib\site-packages (1.20.3) Note: you may need to restart the kernel to use updated packages.
pip install opencv-python
Requirement already satisfied: opencv-python in c:\users\sjhen\anaconda3\lib\site-packages (4.5.5.62) Requirement already satisfied: numpy>=1.19.3 in c:\users\sjhen\anaconda3\lib\site-packages (from opencv-python) (1.20.3) Note: you may need to restart the kernel to use updated packages.
As we can see above, all the required packages are installed.
Lets start with the code.
# let’s import the libraries
# For playing the audio, we will be using “pyttsx3” python library to convert text to speech
import cv2
import numpy as np
Lets create a funtion to run and analyse the video clips, this way we can just call the function with the wanted video.
def motionTrackVideo(video):
initial_frame = None
# Check if the video opened successfully
if (video.isOpened()== False):
print("Error opening video file")
# Run video on successfully opening it
else:
# Get video dimentions and set the cutoff point for motion detection
video_width = int(video.get(3)) # float `width`
video_height = int(video.get(4)) # float `height`
video_cutoff_point_H = int(video_height/2) + 30
video_count_trigger_W = int(video_width/2) + 30
# We start an infinite loop and keep reading frames from the video until we press 'q' or the video comes to an end.
while video.isOpened():
try:
check, frame = video.read()
status = 0
# Gray conversion and noise reduction (smoothening)
gray_frame=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur_frame=cv2.GaussianBlur(gray_frame, (25,25), 0)
# The first captured frame is the baseline image
if initial_frame is None:
initial_frame = blur_frame
continue
# The difference between the baseline and the new frame
delta_frame = cv2.absdiff(initial_frame, blur_frame)
# The difference (the delta_frame) is converted into a binary image
# If a particular pixel value is greater than a certain threshold (specified by us here as 150),
# it will be assigned the value for White (255) else Black(0)
threshold_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
# apply image dilation
kernel = np.ones((3,6), np.uint8)
dilated_frame = cv2.dilate(threshold_frame, kernel, iterations = 3)
# The cv2.findContours() method we will identify all the contours in our image.
# This method expects 3 parameters, (a) image, (b) contour retrieval mode and
# (c) contour approximation method
(contours,_) = cv2.findContours(dilated_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Itterate over contours and process
for i,c in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(c)
# Only check the are that covers main street
if y >= video_cutoff_point_H:
#contourArea() method filters out any small contours
#You can change this value
if cv2.contourArea(c) < 5000:
continue
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1)
# Show line that isolates main street
# cv2.line(frame, (0, video_cutoff_point_H),(video_width, video_cutoff_point_H),(250, 0, 0))
# To better understand the application, we can visualise the different frames generated
cv2.imshow('Original Video', frame)
#cv2.imshow('Original Video', gray_frame)
#cv2.imshow('Original Video', blur_frame)
#cv2.imshow('Original Video', delta_frame)
#cv2.imshow('Original Video', threshold_frame)
#cv2.imshow('Original Video', dilated_frame)
# Stop the program by pressing 'q'
if cv2.waitKey(1) == ord('q'):
print("Manually exited video by pressing q")
cv2.destroyAllWindows()
video.release()
break
# Error or no more frames and end of video
except:
print("End of video reached")
cv2.destroyAllWindows()
video.release()
break
Lets call the above function with the first video and see how well it tracks motion.
# We use VideoCapture function to create the video capture object
video1 = cv2.VideoCapture('Video/Traffic_Laramie_1.mp4')
motionTrackVideo(video1)
End of video reached
Lets call the above function with the second video and see how well it tracks motion.
# We use VideoCapture function to create the video capture object
video2 = cv2.VideoCapture('Video/Traffic_Laramie_2.mp4')
motionTrackVideo(video2)
End of video reached