import cv2
import face_recognition
import numpy as np
# Initialize the camera (0 for the default camera)cap
= cv2.VideoCapture(0)
# Load an image of the known person to compare (replace with your image path)
known_image = face_recognition.load_image_file("known_person.jpg")
known_face_encoding = face_recognition.face_encodings(known_image)[0]
# List of known face encodings (could be extended for more people)
known_face_encodings = [known_face_encoding]
known_face_names = ["Known Person"] # Name of the known person
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Convert the image from BGR to RGB (face_recognition uses RGB)
rgb_frame = frame[:, :, ::-1]
# Find all face locations and face encodings in the current frame
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each detected face
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings): #
Compare the face encoding to the known faces
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)name
= "Unknown"
# If a match is found, use the name associated with that
encodingif True in matches:
first_match_index = matches.index(True) name =
known_face_names[first_match_index]
# Draw a rectangle around the face and label the
person cv2.rectangle(frame, (left, top), (right, bottom), (0,
0, 255), 2)
cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX,
1.0, (255, 255, 255), 1)
# Display the resulting
# Break the loop if 'q' key is
pressed if cv2.waitKey(1) & 0xFF
== ord('q'):
break
# Release the camera and close the OpenCV window
cap.release()
cv2.destroyAllWindows()