0% found this document useful (0 votes)
18 views6 pages

Coding

The document outlines a process for weapon detection using YOLO and a cascade classifier with OpenCV. It includes code for capturing video from a camera, detecting weapons in real-time, and training a MobileNetV2 model for binary classification of images. The final steps involve evaluating the model's accuracy and saving it for future use.

Uploaded by

suntharan4321
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views6 pages

Coding

The document outlines a process for weapon detection using YOLO and a cascade classifier with OpenCV. It includes code for capturing video from a camera, detecting weapons in real-time, and training a MobileNetV2 model for binary classification of images. The final steps involve evaluating the model's accuracy and saving it for future use.

Uploaded by

suntharan4321
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

import cv2

import torch

# Load YOLO model (replace with your chosen YOLO version)


model = [Link]('ultralytics/yolov5', 'yolov5s') # Example using yolov5s
#model = [Link]('ultralytics/yolov8', 'yolov8s') # Example using yolov8s

# Define classes to detect (e.g., "weapon", "person")


class_names = ["weapon", "person"] # Replace with your specific classes

# Load an image or video stream


#image_path = "path/to/your/[Link]"
#img = [Link](image_path)

cap = [Link](0) # 0 for default camera, or path to video

# Detection loop
while True:
ret, frame = [Link]()
if not ret:
break

# Convert to RGB for YOLO


img_rgb = [Link](frame, cv2.COLOR_BGR2RGB)

# Perform inference
results = model(img_rgb)

# Process results
for result in [Link][0]:
#result is a tensor with [x1, y1, x2, y2, confidence, class_id]

# Extract bounding box coordinates and class ID


x1, y1, x2, y2, confidence, class_id = [Link]()

# Convert to integer for drawing


x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
class_id = int(class_id)

# Draw bounding box and label


[Link](frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
label = f"{class_names[class_id]}: {confidence:.2f}"
[Link](frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0),
2)

# Display the image/video


[Link]("Weapon Detection", frame)

# Break loop on 'q' key press


if [Link](1) & 0xFF == ord('q'):
break

# Clean up
[Link]()
[Link]()

Yolo

import numpy as np
import cv2
import imutils
import datetime

gun_cascade = [Link]('[Link]')
camera = [Link](0)
firstFrame = None
gun_exist = False
while True:
ret, frame = [Link]()
if frame is None:
break
frame = [Link](frame, width=500)
gray = [Link](frame, cv2.COLOR_BGR2GRAY)
gun = gun_cascade.detectMultiScale(gray, 1.3, 20, minSize=(100, 100))
if len(gun) > 0:
gun_exist = True
for (x, y, w, h) in gun:
frame = [Link](frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
if firstFrame is None:
firstFrame = gray
continue
[Link](frame, [Link]().strftime("%A %d %B %Y %I:%M:%S %p"),
(10, [Link][0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
if gun_exist:
print("Guns detected")
[Link](frame)
break
else:
[Link]("Security Feed", frame)
key = [Link](1) & 0xFF
if key == ord('q'):
break

[Link]()
[Link]()

Jupytyer notebook

import os
import numpy as np
import [Link] as plt
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import MobileNetV2
from [Link] import Dense, GlobalAveragePooling2D, Dropout
from [Link] import Model
from [Link] import classification_report, confusion_matrix

Setup and import


IMAGE_SIZE = (224, 224)
BATCH_SIZE = 32

train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")

val_test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
'dataset/train',
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary'
)

val_generator = val_test_datagen.flow_from_directory(
'dataset/val',
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary'
)

test_generator = val_test_datagen.flow_from_directory(
'dataset/test',
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary',
shuffle=False
)

Data preprocessing

base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(224, 224, 3))


base_model.trainable = False # Freeze base
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)

model = Model(inputs=base_model.input, outputs=predictions)

[Link](optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

Build the model

history = [Link](
train_generator,
validation_data=val_generator,
epochs=10
)

Train the model

# Test accuracy
loss, accuracy = [Link](test_generator)
print(f'Test accuracy: {accuracy:.2f}')

# Predictions and confusion matrix


y_pred = [Link](test_generator)
y_pred_classes = (y_pred > 0.5).astype("int32").flatten()
y_true = test_generator.classes

print("Classification Report:")
print(classification_report(y_true, y_pred_classes, target_names=['Non-Weapon', 'Weapon']))

print("Confusion Matrix:")
print(confusion_matrix(y_true, y_pred_classes))
Evaluate the model

[Link]('weapon_detection_model.h5')

Save the model

You might also like