# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image:
[Link]
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all
files under the input directory
import os
for dirname, _, filenames in [Link]('/kaggle/input'):
for filename in filenames:
print([Link](dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets
preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved
outside of the current session
//Distancing Parameter
class config():
# base path to YOLO directory
# belum setting
MODEL_PATH = “/ oolea/input/yolo-coco-data”
# initialize minimum probability to filter weak detections along with
# the threshold when applying non-maxima suppression
MIN_CONF = 0.3
NMS_THRESH = 0.3
# oolean indicating if NVIDIA CUDA GPU should be used
USE_GPU = True
# define the minimum safe distance (in pixels) that two people can be
# from each other
MIN_DISTANCE = 50
//Detection
def detect_people(frame, net, ln, personIdx=0):
# grab the dimensions of the frame and initialize the list of
# results
(H, W) = [Link][:2]
results = []
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = [Link](frame, 1 / 255.0, (416, 416), swapRB=True,
crop=False)
[Link](blob)
layerOutputs = [Link](ln)
# initialize our lists of detected bounding boxes, centroids, and
# confidences, respectively
boxes = []
centroids = []
confidences = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = [Link](scores)
confidence = scores[classID]
# filter detections by (1) ensuring that the object
# detected was a person and (2) that the minimum
# confidence is met
if classID == personIdx and confidence > config.MIN_CONF:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * [Link]([W, H, W, H])
(centerX, centerY, width, height) = [Link]("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# centroids, and confidences
[Link]([x, y, int(width), int(height)])
[Link]((centerX, centerY))
[Link](float(confidence))
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = [Link](boxes, confidences, config.MIN_CONF, config.NMS_THRESH)
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in [Link]():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# update our results list to consist of the person
# prediction probability, bounding box coordinates,
# and the centroid
r = (confidences[i], (x, y, x + w, y + h), centroids[i])
[Link](r)
# return the list of results
return results
# USAGE
# python social_distance_detector.py --input SamVideo.mp4
# python social_distance_detector.py --input SamVideo.mp4 --output [Link]
# import the necessary packages
## from pyimagesearch import social_distancing_config as config
## from [Link] import detect_people
from [Link] import distance as dist
'''
# construct the argument parse and parse the arguments
ap = [Link]()
ap.add_argument("-i", "--input", type=str, default="",
help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="",
help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
help="whether or not output frame should be displayed")
args = vars(ap.parse_args())
'''
# load the COCO class labels our YOLO model was trained on
labelsPath = [Link]([config.MODEL_PATH, "[Link]"])
LABELS = open(labelsPath).read().strip().split("\n")
# derive the paths to the YOLO weights and model configuration
weightsPath = [Link]([config.MODEL_PATH, "[Link]"])
configPath = [Link]([config.MODEL_PATH, "[Link]"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = [Link](configPath, weightsPath)
# check if we are going to use GPU
if config.USE_GPU:
# set CUDA as the preferable backend and target
print("[INFO] setting preferable backend and target to CUDA...")
[Link]([Link].DNN_BACKEND_OPENCV)
[Link]([Link].DNN_TARGET_OPENCL_FP16)
# determine only the *output* layer names that we need from YOLO
ln = [Link]()
ln = [ln[i[0] - 1] for i in [Link]()]
# initialize the video stream and pointer to output video file
print("[INFO] accessing video stream...")
vs = [Link](input_data if input_data else 0)
writer = None
# loop over the frames from the video stream
while True:
# read the next frame from the file
(grabbed, frame) = [Link]()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# resize the frame and then detect people (and only people) in it
frame = [Link](frame, width=700)
results = detect_people(frame, net, ln, personIdx=[Link]("person"))
# initialize the set of indexes that violate the minimum social
# distance
violate = set()
# ensure there are *at least* two people detections (required in
# order to compute our pairwise distance maps)
if len(results) >= 2:
# extract all centroids from the results and compute the
# Euclidean distances between all pairs of the centroids
centroids = [Link]([r[2] for r in results])
D = [Link](centroids, centroids, metric="euclidean")
# loop over the upper triangular of the distance matrix
for i in range(0, [Link][0]):
for j in range(i + 1, [Link][1]):
# check to see if the distance between any two
# centroid pairs is less than the configured number
# of pixels
if D[i, j] < config.MIN_DISTANCE:
# update our violation set with the indexes of
# the centroid pairs
[Link](i)
[Link](j)
# loop over the results
for (i, (prob, bbox, centroid)) in enumerate(results):
# extract the bounding box and centroid coordinates, then
# initialize the color of the annotation
(startX, startY, endX, endY) = bbox
(cX, cY) = centroid
color = (0, 255, 0)
# if the index pair exists within the violation set, then
# update the color
if i in violate:
color = (0, 0, 255)
# draw (1) a bounding box around the person and (2) the
# centroid coordinates of the person,
[Link](frame, (startX, startY), (endX, endY), color, 2)
[Link](frame, (cX, cY), 5, color, 1)
# draw the total number of social distancing violations on the
# output frame
text = "Social Distancing Violations: {}".format(len(violate))
[Link](frame, text, (10, [Link][0] - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
# check to see if the output frame should be displayed to our
# screen
#if args["display"] > 0:
if False:
# show the output frame
[Link]("Frame", frame)
key = [Link](1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# if an output video file path has been supplied and the video
# writer has not been initialized, do so now
if output_data != "" and writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = [Link](output_data, fourcc, 25, ([Link][1],
[Link][0]), True)
# if the video writer is not None, write the frame to the output
# video file
if writer is not None:
[Link](frame)
print('[INFO] done')
//Sample Run
from [Link] import HTML
from base64 import b64encode
vid1 = open('/kaggle/working/[Link]','rb').read()
data_url = "data:video/avi;base64," + b64encode(vid1).decode()
HTML("""
<video width=600 controls>
<source src="%s" type="video/avi">
</video>
""" % data_url)