import os
import cv2
import numpy as np
from [Link] import VideoFileClip
from [Link] import concatenate_videoclips
from gtts import gTTS
from transformers import pipeline
import warnings
import tensorflow as tf
# Suppress TensorFlow and resource warnings
[Link]['TF_ENABLE_ONEDNN_OPTS'] = '0'
[Link]['HF_HUB_DISABLE_SYMLINKS_WARNING'] = '1'
tf.get_logger().setLevel('ERROR')
[Link]("ignore", category=ResourceWarning)
# Step 1: Video Scene Segmentation
def segment_video(video_path):
cap = [Link](video_path)
scenes = []
frame_rate = [Link](cv2.CAP_PROP_FPS)
prev_frame = None
frame_number = 0
while [Link]():
ret, frame = [Link]()
if not ret:
break
if prev_frame is not None:
diff = [Link]([Link](frame, cv2.COLOR_BGR2GRAY),
[Link](prev_frame, cv2.COLOR_BGR2GRAY))
mean_diff = [Link](diff)
if mean_diff > 30: # Scene change threshold
[Link](frame_number / frame_rate)
prev_frame = frame
frame_number += 1
[Link]()
# Remove near-duplicate scenes
unique_scenes = []
min_gap = 2 # Minimum gap between scenes in seconds
for scene in scenes:
if not unique_scenes or scene - unique_scenes[-1] > min_gap:
unique_scenes.append(scene)
return unique_scenes
# Step 2: Summarization
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
def summarize_text(text):
return summarizer(text, max_length=50, min_length=25, do_sample=False)[0]['summary_text']
# Step 3: Text-to-Speech
def text_to_speech(text, lang, output_file):
try:
tts = gTTS(text=text, lang=lang)
[Link](output_file)
except Exception as e:
print(f"Error generating TTS for language {lang}: {e}")
# Step 4: Create Summary Video
def create_summary_video(video_path, selected_scenes, output_file):
clips = []
video = VideoFileClip(video_path)
try:
# Process scenes to ensure no overlap or invalid durations
unique_scenes = []
for start in selected_scenes:
end = start + 5 # 5-second clips
if not unique_scenes or start >= unique_scenes[-1][1]:
unique_scenes.append((start, min(end, [Link])))
# Create subclips for unique scenes
for start, end in unique_scenes:
[Link]([Link](start, end))
# Concatenate clips and export the summary video
final_clip = concatenate_videoclips(clips, method="compose")
final_clip.write_videofile(output_file, codec="libx264", verbose=False)
finally:
# Ensure the video object is closed
[Link]()
# Main Function
def main():
video_path = "videoplayback (3).mp4" # Replace with your video file path
output_summary = "summary_video.mp4"
print("Segmenting video...")
scenes = segment_video(video_path)
print(f"Detected unique scenes: {scenes}")
print("Summarizing scenes...")
summarized_text = "This is a demo summary of scenes." # Replace with actual summarization
logic
print("Generating English voiceover...")
text_to_speech(summarized_text, "en", "voiceover_en.mp3")
print("Creating summary video...")
create_summary_video(video_path, scenes, output_summary)
print("Summary video created successfully!")
if __name__ == "__main__":
main()