0% found this document useful (0 votes)
18 views2 pages

Deepface Py

The document is a Python script that processes videos to extract keyframes containing faces and their associated emotions using DeepFace and FER libraries. It initializes face detection models, analyzes emotions from detected faces, and saves the keyframes along with the full frame images to a specified output directory. The script iterates through all MP4 videos in a given directory, extracting and saving keyframes based on specified conditions.

Uploaded by

amicool8
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views2 pages

Deepface Py

The document is a Python script that processes videos to extract keyframes containing faces and their associated emotions using DeepFace and FER libraries. It initializes face detection models, analyzes emotions from detected faces, and saves the keyframes along with the full frame images to a specified output directory. The script iterates through all MP4 videos in a given directory, extracting and saving keyframes based on specified conditions.

Uploaded by

amicool8
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 2

import os

import cv2
import numpy as np
from tqdm import tqdm
from deepface import DeepFace
from fer import FER
import insightface

VIDEO_DIR = r"D:\MINIPOL PICTURES\Minipoll Videos"


OUTPUT_DIR = r"D:\MINIPOL PICTURES\Minipoll KeyFrames"

if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)

# ‫ יוזם את‬InsightFace ‫ על‬CUDA (‫)אם מותקן‬


insightface_detector = insightface.app.FaceAnalysis(name='buffalo_l',
providers=['CUDAExecutionProvider'])
insightface_detector.prepare(ctx_id=0, det_size=(640, 640))

# ‫ יוזם את‬FER, ‫ על‬MTCNN (‫)בדיוק משופר‬


fer_detector = FER(mtcnn=True)

def get_emotion_deepface(face_img):
try:
# ‫ בחר מודל אמוציות חזק‬- SEResNet ‫ או‬EfficientNet (‫)או נשאר עם ברירת מחדל‬
result = DeepFace.analyze(
face_img,
actions=['emotion'],
detector_backend='retinaface', # ‫מדויק ומהיר‬
enforce_detection=False,
silent=True
)
emotion = result['dominant_emotion']
conf = result['emotion'][emotion]
return emotion, conf
except Exception as e:
return None, 0

def get_emotion_fer(face_img):
try:
results = fer_detector.detect_emotions(face_img)
if results:
emotions = results[0]["emotions"]
emotion = max(emotions, key=emotions.get)
conf = emotions[emotion]
return emotion, conf
return None, 0
except Exception as e:
return None, 0

def extract_keyframes(video_path, save_dir, skip_frames=5, min_conf=0.6):


cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_num = 0
prev_emotion = None
saved_count = 0

while True:
ret, frame = cap.read()
if not ret:
break

if frame_num % (fps // skip_frames) == 0:


faces = insightface_detector.get(frame)
for idx, face in enumerate(faces):
x1, y1, x2, y2 = [int(v) for v in face.bbox]
# ‫מגביל לגבולות התמונה‬
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(frame.shape[1], x2), min(frame.shape[0], y2)
face_img = frame[y1:y2, x1:x2]
if face_img.size == 0 or (y2-y1)<60 or (x2-x1)<60: # ‫מתעלם מ‬-small
faces
continue

# DeepFace (GPU)
emotion_df, conf_df = get_emotion_deepface(face_img)
# FER (GPU)
emotion_fer, conf_fer = get_emotion_fer(face_img)

# ‫בוחר את הרגש הכי חזק‬


emotion, conf = (emotion_df, conf_df) if conf_df >= conf_fer else
(emotion_fer, conf_fer)

if conf >= min_conf and emotion and emotion != prev_emotion:


face_name = f"{os.path.splitext(os.path.basename(video_path))
[0]}_frame{frame_num}_face{idx}_emo_{emotion}.jpg"
face_path = os.path.join(save_dir, face_name)
cv2.imwrite(face_path, face_img)
# ‫גם את התמונה המלאה‬
full_name = f"{os.path.splitext(os.path.basename(video_path))
[0]}_frame{frame_num}_emo_{emotion}_full.jpg"
full_path = os.path.join(save_dir, full_name)
cv2.imwrite(full_path, frame)
prev_emotion = emotion
saved_count += 1
print(f"Saved {face_name} (conf: {conf:.2f})")
break # ‫לא שומר עוד פרצופים באותו פריים‬

frame_num += 1

cap.release()
return saved_count

if __name__ == "__main__":
for fname in tqdm(os.listdir(VIDEO_DIR)):
if fname.lower().endswith(".mp4"):
vid_path = os.path.join(VIDEO_DIR, fname)
print(f"Processing {fname}")
num_saved = extract_keyframes(vid_path, OUTPUT_DIR, skip_frames=3,
min_conf=0.6)
print(f"Extracted {num_saved} keyframes from {fname}")
print("All done!")

You might also like