0% found this document useful (0 votes)
23 views29 pages

Jamming Detection

The document outlines a process for generating, preprocessing, and modeling a dataset for jamming detection using machine learning techniques. It includes steps for creating normal and jammed signal datasets, normalizing the data, and training various models including SVM, Random Forest, and a Deep Neural Network (DNN). The final models are evaluated for accuracy, with the DNN achieving the highest accuracy among the models tested.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
23 views29 pages

Jamming Detection

The document outlines a process for generating, preprocessing, and modeling a dataset for jamming detection using machine learning techniques. It includes steps for creating normal and jammed signal datasets, normalizing the data, and training various models including SVM, Random Forest, and a Deep Neural Network (DNN). The final models are evaluated for accuracy, with the DNN achieving the highest accuracy among the models tested.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd

from google.

colab import drive


drive.mount('/content/drive')

Mounted at /content/drive

import os

# Define folder path


folder_path = "/content/drive/My Drive/Jamming_Detection_Project"

# Create the folder if it doesn't exist


if not os.path.exists(folder_path):
os.makedirs(folder_path)

print(f"✅ Folder created at: {folder_path}")

✅ Folder created at: /content/drive/My Drive/Jamming_Detection_Project

import numpy as np
import pandas as pd

# Define number of samples


num_samples = 30000 # Larger dataset for more variations

# Generate complex normal signals with random distortions


normal_signals = {
"noise": np.random.uniform(-85, -20, num_samples // 2), #
Overlapping with jammed signals
"max_magnitude": np.random.uniform(0.1, 2.0, num_samples // 2),
"total_gain_db": np.random.uniform(0, 40, num_samples // 2),
"base_pwr_db": np.random.uniform(-80, -5, num_samples // 2),
"rssi": np.random.uniform(-80, -10, num_samples // 2),
"relpwr_db": np.random.uniform(-30, 30, num_samples // 2),
"avgpwr_db": np.random.uniform(-70, 20, num_samples // 2),
"jamming_type": np.zeros(num_samples // 2), # Label: 0 (No
jamming)
}

# Generate complex jammed signals (3 different jamming types)


jammed_signals = {
"noise": np.random.uniform(-60, 5, num_samples // 2), # More
overlapping
"max_magnitude": np.random.uniform(0.5, 3.0, num_samples // 2),
"total_gain_db": np.random.uniform(10, 50, num_samples // 2),
"base_pwr_db": np.random.uniform(-60, 0, num_samples // 2),
"rssi": np.random.uniform(-60, 5, num_samples // 2), # More
randomness
"relpwr_db": np.random.uniform(0, 40, num_samples // 2),
"avgpwr_db": np.random.uniform(-30, 40, num_samples // 2),
"jamming_type": np.random.choice([1, 2, 3], num_samples // 2), #
3 jamming types
}

# Combine data
df_normal = pd.DataFrame(normal_signals)
df_jammed = pd.DataFrame(jammed_signals)
df = pd.concat([df_normal,
df_jammed]).sample(frac=1).reset_index(drop=True) # Shuffle

# Save dataset to Google Drive


dataset_path = "/content/drive/My
Drive/Jamming_Detection_Project/super_complex_jamming_dataset.csv"
df.to_csv(dataset_path, index=False)

print(f"✅ Super Complex dataset generated & saved to: {dataset_path}")

✅ Super Complex dataset generated & saved to: /content/drive/My


Drive/Jamming_Detection_Project/super_complex_jamming_dataset.csv

from sklearn.preprocessing import MinMaxScaler


from sklearn.model_selection import train_test_split
import joblib

# Load dataset from Google Drive


df = pd.read_csv(dataset_path)

# Select features & labels


features = ['noise', 'max_magnitude', 'total_gain_db', 'base_pwr_db',
'rssi', 'relpwr_db', 'avgpwr_db']
X = df[features]
y = df['jamming_type'] # Multiclass classification (0 = Normal, 1, 2,
3 = Jamming Types)

# Normalize data
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)

# Split data
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y,
test_size=0.2, random_state=42, stratify=y)

# Save processed data to Google Drive


preprocessed_path = "/content/drive/My
Drive/Jamming_Detection_Project/super_preprocessed_data.pkl"
joblib.dump((X_train, X_test, y_train, y_test), preprocessed_path)

print(f"✅ Data Preprocessing Completed. Saved at:


{preprocessed_path}")

✅ Data Preprocessing Completed. Saved at: /content/drive/My


Drive/Jamming_Detection_Project/super_preprocessed_data.pkl
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score

# Load preprocessed data


X_train, X_test, y_train, y_test = joblib.load(preprocessed_path)

# Train SVM Model


svm_model = SVC(kernel='rbf', C=1.0, gamma='auto')
svm_model.fit(X_train, y_train)

# Save Model
joblib.dump(svm_model, "/content/drive/My
Drive/Jamming_Detection_Project/super_svm_model.pkl")

# Evaluate Model
svm_predictions = svm_model.predict(X_test)
svm_accuracy = accuracy_score(y_test, svm_predictions)
print(f"✅ SVM Model Accuracy: {svm_accuracy * 100:.2f}%")

✅ SVM Model Accuracy: 62.37%

from sklearn.ensemble import RandomForestClassifier

# Train Random Forest Model with Controlled Depth


rf_model = RandomForestClassifier(n_estimators=150, max_depth=20,
random_state=42)
rf_model.fit(X_train, y_train)

# Save Model
joblib.dump(rf_model, "/content/drive/My
Drive/Jamming_Detection_Project/super_rf_model.pkl")

# Evaluate Model
rf_predictions = rf_model.predict(X_test)
rf_accuracy = accuracy_score(y_test, rf_predictions)
print(f"✅ Random Forest Model Accuracy: {rf_accuracy * 100:.2f}%")

✅ Random Forest Model Accuracy: 64.82%

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical

# Convert labels to categorical (since we have 4 classes)


y_train_cat = to_categorical(y_train, num_classes=4)
y_test_cat = to_categorical(y_test, num_classes=4)

# Build DNN Model with Dropout


dnn_model = keras.Sequential([
keras.layers.Dense(128, activation='relu',
input_shape=(X_train.shape[1],)),
keras.layers.Dropout(0.5),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(4, activation='softmax') # 4 classes
])

# Compile & Train Model


dnn_model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
dnn_model.fit(X_train, y_train_cat, epochs=40, batch_size=64,
validation_data=(X_test, y_test_cat))

# Save Model
dnn_model.save("/content/drive/My
Drive/Jamming_Detection_Project/super_dnn_model.h5")

# Evaluate Model
dnn_loss, dnn_accuracy = dnn_model.evaluate(X_test, y_test_cat)
print(f"✅ DNN Model Accuracy: {dnn_accuracy * 100:.2f}%")

/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Epoch 1/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5195 - loss:
1.1172 - val_accuracy: 0.6253 - val_loss: 0.7317
Epoch 2/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6149 - loss:
0.7594 - val_accuracy: 0.6178 - val_loss: 0.7204
Epoch 3/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6140 - loss:
0.7437 - val_accuracy: 0.6240 - val_loss: 0.7173
Epoch 4/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6225 - loss:
0.7190 - val_accuracy: 0.6272 - val_loss: 0.6983
Epoch 5/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6180 - loss:
0.7210 - val_accuracy: 0.6370 - val_loss: 0.6877
Epoch 6/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6195 - loss:
0.7130 - val_accuracy: 0.6340 - val_loss: 0.6784
Epoch 7/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6267 - loss:
0.6918 - val_accuracy: 0.6345 - val_loss: 0.6686
Epoch 8/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6330 - loss:
0.6848 - val_accuracy: 0.6358 - val_loss: 0.6604
Epoch 9/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6299 - loss:
0.6789 - val_accuracy: 0.6368 - val_loss: 0.6581
Epoch 10/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6282 - loss:
0.6731 - val_accuracy: 0.6372 - val_loss: 0.6498
Epoch 11/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6378 - loss:
0.6665 - val_accuracy: 0.6387 - val_loss: 0.6468
Epoch 12/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6368 - loss:
0.6647 - val_accuracy: 0.6408 - val_loss: 0.6423
Epoch 13/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6418 - loss:
0.6562 - val_accuracy: 0.6418 - val_loss: 0.6458
Epoch 14/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6397 - loss:
0.6559 - val_accuracy: 0.6398 - val_loss: 0.6408
Epoch 15/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6314 - loss:
0.6636 - val_accuracy: 0.6465 - val_loss: 0.6385
Epoch 16/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6370 - loss:
0.6556 - val_accuracy: 0.6350 - val_loss: 0.6369
Epoch 17/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6410 - loss:
0.6494 - val_accuracy: 0.6427 - val_loss: 0.6365
Epoch 18/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6394 - loss:
0.6465 - val_accuracy: 0.6377 - val_loss: 0.6347
Epoch 19/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6385 - loss:
0.6519 - val_accuracy: 0.6493 - val_loss: 0.6288
Epoch 20/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6460 - loss:
0.6427 - val_accuracy: 0.6403 - val_loss: 0.6336
Epoch 21/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6393 - loss:
0.6431 - val_accuracy: 0.6333 - val_loss: 0.6296
Epoch 22/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6398 - loss:
0.6407 - val_accuracy: 0.6410 - val_loss: 0.6295
Epoch 23/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6456 - loss:
0.6382 - val_accuracy: 0.6427 - val_loss: 0.6261
Epoch 24/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6454 - loss:
0.6357 - val_accuracy: 0.6420 - val_loss: 0.6287
Epoch 25/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6436 - loss:
0.6411 - val_accuracy: 0.6340 - val_loss: 0.6274
Epoch 26/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6409 - loss:
0.6393 - val_accuracy: 0.6428 - val_loss: 0.6245
Epoch 27/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6356 - loss:
0.6466 - val_accuracy: 0.6382 - val_loss: 0.6240
Epoch 28/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6362 - loss:
0.6386 - val_accuracy: 0.6410 - val_loss: 0.6244
Epoch 29/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6408 - loss:
0.6363 - val_accuracy: 0.6430 - val_loss: 0.6221
Epoch 30/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6490 - loss:
0.6336 - val_accuracy: 0.6390 - val_loss: 0.6253
Epoch 31/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.6427 - loss:
0.6411 - val_accuracy: 0.6428 - val_loss: 0.6235
Epoch 32/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6432 - loss:
0.6353 - val_accuracy: 0.6442 - val_loss: 0.6240
Epoch 33/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6453 - loss:
0.6370 - val_accuracy: 0.6420 - val_loss: 0.6192
Epoch 34/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6447 - loss:
0.6347 - val_accuracy: 0.6437 - val_loss: 0.6181
Epoch 35/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6408 - loss:
0.6351 - val_accuracy: 0.6420 - val_loss: 0.6174
Epoch 36/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6479 - loss:
0.6311 - val_accuracy: 0.6413 - val_loss: 0.6197
Epoch 37/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6379 - loss:
0.6373 - val_accuracy: 0.6467 - val_loss: 0.6173
Epoch 38/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6413 - loss:
0.6280 - val_accuracy: 0.6425 - val_loss: 0.6196
Epoch 39/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6509 - loss:
0.6230 - val_accuracy: 0.6388 - val_loss: 0.6239
Epoch 40/40
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6487 - loss:
0.6266 - val_accuracy: 0.6417 - val_loss: 0.6302

WARNING:absl:You are saving your model as an HDF5 file via


`model.save()` or `keras.saving.save_model(model)`. This file format
is considered legacy. We recommend using instead the native Keras
format, e.g. `model.save('my_model.keras')` or
`keras.saving.save_model(model, 'my_model.keras')`.

188/188 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6519 - loss:


0.6224
✅ DNN Model Accuracy: 64.17%

from tensorflow.keras.models import load_model

# Load the trained DNN model


dnn_model = load_model("/content/drive/My
Drive/Jamming_Detection_Project/super_dnn_model.h5")

# Generate Predictions for DNN


dnn_predictions = dnn_model.predict(X_test) # Get probability scores
dnn_predictions = dnn_predictions.argmax(axis=1) # Convert
probabilities to class labels

print("✅ DNN Predictions Generated Successfully!")

WARNING:absl:Compiled the loaded model, but the compiled metrics have


yet to be built. `model.compile_metrics` will be empty until you train
or evaluate the model.

188/188 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step


✅ DNN Predictions Generated Successfully!

from sklearn.metrics import classification_report


import pandas as pd

# Generate classification reports


svm_report = classification_report(y_test, svm_predictions,
output_dict=True)
rf_report = classification_report(y_test, rf_predictions,
output_dict=True)
dnn_report = classification_report(y_test, dnn_predictions,
output_dict=True)

# Convert to DataFrame for easier comparison


metrics_df = pd.DataFrame({
"Metric": ["Accuracy", "Precision", "Recall", "F1-Score"],
"SVM": [svm_report["accuracy"], svm_report["weighted avg"]
["precision"], svm_report["weighted avg"]["recall"],
svm_report["weighted avg"]["f1-score"]],
"Random Forest": [rf_report["accuracy"], rf_report["weighted avg"]
["precision"], rf_report["weighted avg"]["recall"],
rf_report["weighted avg"]["f1-score"]],
"DNN": [dnn_report["accuracy"], dnn_report["weighted avg"]
["precision"], dnn_report["weighted avg"]["recall"],
dnn_report["weighted avg"]["f1-score"]]
})

# Display metrics
print("✅ Model Performance Metrics")
print(metrics_df)

✅ Model Performance Metrics


Metric SVM Random Forest DNN
0 Accuracy 0.623667 0.648167 0.641667
1 Precision 0.594354 0.622882 0.612188
2 Recall 0.623667 0.648167 0.641667
3 F1-Score 0.604144 0.634205 0.596575

import seaborn as sns


import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix

# Compute confusion matrices


svm_cm = confusion_matrix(y_test, svm_predictions)
rf_cm = confusion_matrix(y_test, rf_predictions)
dnn_cm = confusion_matrix(y_test, dnn_predictions)

# Plot all confusion matrices


fig, axes = plt.subplots(1, 3, figsize=(18, 5))

sns.heatmap(svm_cm, annot=True, fmt="d", cmap="Blues", ax=axes[0])


axes[0].set_title("SVM Confusion Matrix")
axes[0].set_xlabel("Predicted Label")
axes[0].set_ylabel("True Label")

sns.heatmap(rf_cm, annot=True, fmt="d", cmap="Greens", ax=axes[1])


axes[1].set_title("Random Forest Confusion Matrix")
axes[1].set_xlabel("Predicted Label")
axes[1].set_ylabel("True Label")

sns.heatmap(dnn_cm, annot=True, fmt="d", cmap="Purples", ax=axes[2])


axes[2].set_title("DNN Confusion Matrix")
axes[2].set_xlabel("Predicted Label")
axes[2].set_ylabel("True Label")

plt.show()
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import matplotlib.pyplot as plt

# Convert y_test to one-hot encoding for multi-class ROC


y_test_bin = label_binarize(y_test, classes=[0, 1, 2, 3])

# Convert model predictions to probability scores (if not already


probabilities)
svm_probs = svm_model.decision_function(X_test) # SVM decision
function gives confidence scores
rf_probs = rf_model.predict_proba(X_test) # RF gives probability
scores
dnn_probs = dnn_model.predict(X_test) # DNN already outputs
probabilities

# Compute ROC curve & AUC for each class


plt.figure(figsize=(8, 6))

for i in range(y_test_bin.shape[1]): # Loop through each class (0, 1,


2, 3)
svm_fpr, svm_tpr, _ = roc_curve(y_test_bin[:, i], svm_probs[:, i])
rf_fpr, rf_tpr, _ = roc_curve(y_test_bin[:, i], rf_probs[:, i])
dnn_fpr, dnn_tpr, _ = roc_curve(y_test_bin[:, i], dnn_probs[:, i])

svm_auc = auc(svm_fpr, svm_tpr)


rf_auc = auc(rf_fpr, rf_tpr)
dnn_auc = auc(dnn_fpr, dnn_tpr)

# Plot each class separately


plt.plot(svm_fpr, svm_tpr, label=f"SVM Class {i} (AUC =
{svm_auc:.2f})", linestyle="--")
plt.plot(rf_fpr, rf_tpr, label=f"RF Class {i} (AUC =
{rf_auc:.2f})", linestyle="-.")
plt.plot(dnn_fpr, dnn_tpr, label=f"DNN Class {i} (AUC =
{dnn_auc:.2f})")
# Baseline (random classifier)
plt.plot([0, 1], [0, 1], linestyle="--", color="grey")

plt.xlabel("False Positive Rate (FPR)")


plt.ylabel("True Positive Rate (TPR)")
plt.title("Multiclass ROC Curve (One-vs-Rest)")
plt.legend()
plt.show()

188/188 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step

import seaborn as sns

# Get feature importance from Random Forest model


feature_importance = rf_model.feature_importances_
features = ['noise', 'max_magnitude', 'total_gain_db', 'base_pwr_db',
'rssi', 'relpwr_db', 'avgpwr_db']

# Sort features by importance


sorted_idx = np.argsort(feature_importance)

# Plot feature importance


plt.figure(figsize=(8, 6))
sns.barplot(x=feature_importance[sorted_idx], y=np.array(features)
[sorted_idx], palette="viridis")
plt.xlabel("Importance Score")
plt.ylabel("Feature Name")
plt.title("Feature Importance (Random Forest)")
plt.show()

<ipython-input-29-b478c899494d>:12: FutureWarning:

Passing `palette` without assigning `hue` is deprecated and will be


removed in v0.14.0. Assign the `y` variable to `hue` and set
`legend=False` for the same effect.

sns.barplot(x=feature_importance[sorted_idx], y=np.array(features)
[sorted_idx], palette="viridis")

from google.colab import drive


import pandas as pd
# Mount Google Drive
drive.mount('/content/drive')

# Load Dataset
dataset_path = "/content/drive/My
Drive/Jamming_Detection_Project/super_complex_jamming_dataset.csv"
df = pd.read_csv(dataset_path)

# Display first few rows


print(df.head())

Drive already mounted at /content/drive; to attempt to forcibly


remount, call drive.mount("/content/drive", force_remount=True).
noise max_magnitude total_gain_db base_pwr_db rssi
relpwr_db \
0 -40.816045 2.448250 46.341327 -25.074799 -25.581302
18.978585
1 -41.329000 0.217764 6.586978 -16.519332 -18.551057 -
4.873869
2 -70.187529 0.495369 31.908363 -5.422553 -69.157683
15.538271
3 -38.554441 2.636759 37.360392 -16.149013 -17.291340
10.956356
4 -1.150142 2.940238 43.603314 -35.915749 -35.948341
38.846777

avgpwr_db jamming_type
0 12.981277 2.0
1 -35.204764 0.0
2 -53.689181 0.0
3 -15.581764 3.0
4 -3.707104 2.0

# Compute moving average of relative power


df["relpwr_moving_avg"] = df["relpwr_db"].rolling(window=5,
min_periods=1).mean()

# Predict jamming based on deviation from moving average


df["Predicted_Jamming_MA"] = np.where(abs(df["relpwr_db"] -
df["relpwr_moving_avg"]) > 10, 1, 0)

# Compare with actual labels


accuracy_ma = accuracy_score(df["jamming_type"] > 0,
df["Predicted_Jamming_MA"])

print(f"✅ Moving Average Filtering Accuracy: {accuracy_ma * 100:.2f}


%")

✅ Moving Average Filtering Accuracy: 44.84%


# Compute Signal-to-Noise Ratio (SNR)
df["SNR"] = df["rssi"] - df["noise"]

# Set SNR threshold for jamming


snr_threshold = -10
df["Predicted_Jamming_SNR"] = np.where(df["SNR"] < snr_threshold, 1,
0)

# Compare with actual labels


accuracy_snr = accuracy_score(df["jamming_type"] > 0,
df["Predicted_Jamming_SNR"])

print(f"✅ Energy-Based SNR Detection Accuracy: {accuracy_snr *


100:.2f}%")

✅ Energy-Based SNR Detection Accuracy: 54.24%

import matplotlib.pyplot as plt

# Accuracy values (Traditional Methods)


methods = ["Moving Average", "Energy-Based (SNR)"]
accuracies = [accuracy_ma, accuracy_snr]

# ML Model Accuracies (From previous results)


ml_methods = ["SVM", "Random Forest", "DNN"]
ml_accuracies = [0.6237, 0.6482, 0.6417] # Use actual ML results

# Plot Traditional Methods vs ML Models


plt.figure(figsize=(10, 5))
plt.bar(methods, accuracies, color="red", alpha=0.7,
label="Traditional Methods")
plt.bar(ml_methods, ml_accuracies, color="blue", alpha=0.7, label="ML
Models")
plt.ylim(0.4, 0.7) # Adjusted to reflect all accuracies
plt.ylabel("Detection Accuracy")
plt.title("Comparison: Traditional Methods vs ML Models")
plt.legend()
plt.show()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
from sklearn.ensemble import IsolationForest
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.metrics import silhouette_score

# ✅ Step 1: Load & Preprocess Data (Enhanced RF Jamming Dataset)


np.random.seed(42)
num_samples = 30000

# Generate Normal Signals


normal_signals = {
"noise": np.random.uniform(-85, -20, num_samples // 2),
"max_magnitude": np.random.uniform(0.1, 2.0, num_samples // 2),
"total_gain_db": np.random.uniform(0, 40, num_samples // 2),
"base_pwr_db": np.random.uniform(-80, -5, num_samples // 2),
"rssi": np.random.uniform(-80, -10, num_samples // 2),
"relpwr_db": np.random.uniform(-30, 30, num_samples // 2),
"avgpwr_db": np.random.uniform(-70, 20, num_samples // 2),
"latency": np.random.uniform(5, 50, num_samples // 2),
"packet_loss": np.random.uniform(0, 5, num_samples // 2),
}

# Generate Jammed Signals


jammed_signals = {
"noise": np.random.uniform(-60, 5, num_samples // 2),
"max_magnitude": np.random.uniform(0.5, 3.0, num_samples // 2),
"total_gain_db": np.random.uniform(10, 50, num_samples // 2),
"base_pwr_db": np.random.uniform(-60, 0, num_samples // 2),
"rssi": np.random.uniform(-60, 5, num_samples // 2),
"relpwr_db": np.random.uniform(0, 40, num_samples // 2),
"avgpwr_db": np.random.uniform(-30, 40, num_samples // 2),
"latency": np.random.uniform(50, 200, num_samples // 2),
"packet_loss": np.random.uniform(5, 20, num_samples // 2),
}

df_normal = pd.DataFrame(normal_signals)
df_jammed = pd.DataFrame(jammed_signals)
df = pd.concat([df_normal,
df_jammed]).sample(frac=1).reset_index(drop=True)

# Normalize Data
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(df)

# ✅ Step 2: Implement Unsupervised Learning Models

### **🔹 Dynamic K-Means Clustering**


best_clusters = 2
best_score = -1
for n in range(2, 6):
kmeans_test = KMeans(n_clusters=n, random_state=42, n_init=10)
clusters = kmeans_test.fit_predict(X_scaled)
score = silhouette_score(X_scaled, clusters)
if score > best_score:
best_score = score
best_clusters = n

# Train Final K-Means Model


kmeans = KMeans(n_clusters=best_clusters, random_state=42, n_init=10)
df["KMeans_Cluster"] = kmeans.fit_predict(X_scaled)

# Identify Anomalies Using Distance to Cluster Centers


cluster_distances = np.linalg.norm(X_scaled -
kmeans.cluster_centers_[df["KMeans_Cluster"]], axis=1)
threshold_kmeans = np.percentile(cluster_distances, 92) # Slightly
adjusted for variance
df["KMeans_Anomaly"] = (cluster_distances >
threshold_kmeans).astype(int)

print(f"K-Means Anomalies Detected: {df['KMeans_Anomaly'].sum()}")

### **🔹 Isolation Forest (Dynamic Contamination)**


q1, q3 = np.percentile(cluster_distances, [25, 75])
iqr = q3 - q1
optimal_contamination = min(0.1, max(0.005, (iqr /
np.median(cluster_distances)))) # Ensuring variability

iso_forest = IsolationForest(n_estimators=150,
contamination=optimal_contamination, random_state=42)
df["IsolationForest_Anomaly"] = iso_forest.fit_predict(X_scaled)
df["IsolationForest_Anomaly"] = df["IsolationForest_Anomaly"].map({1:
0, -1: 1})

print(f"Isolation Forest Anomalies Detected:


{df['IsolationForest_Anomaly'].sum()}")

### **🔹 Autoencoder (Dynamic Thresholding)**


autoencoder = keras.Sequential([
keras.layers.Input(shape=(X_scaled.shape[1],)),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(X_scaled.shape[1], activation='sigmoid')
])

autoencoder.compile(optimizer='adam', loss='mse')

# Train Autoencoder (Only on Normal Data)


X_train, X_test = train_test_split(X_scaled, test_size=0.2,
random_state=42)
autoencoder.fit(X_train, X_train, epochs=30, batch_size=64,
validation_data=(X_test, X_test), verbose=1)

# Compute Reconstruction Errors


reconstructed_X = autoencoder.predict(X_scaled)
df["Reconstruction_Error"] = np.mean(np.square(X_scaled -
reconstructed_X), axis=1)

# Adaptive Threshold for Anomaly Detection


q1, q3 = np.percentile(df["Reconstruction_Error"], [25, 75])
iqr = q3 - q1
threshold_autoencoder = q3 + 1.3 * iqr # Small adjustment for natural
variance

df["Autoencoder_Anomaly"] = (df["Reconstruction_Error"] >


threshold_autoencoder).astype(int)

print(f"Autoencoder Anomalies Detected:


{df['Autoencoder_Anomaly'].sum()}")

# ✅ Step 3: Compare Final Anomaly Counts (No Adjustments)


print("\nFinal Raw Anomaly Counts (No Adjustments):")
print(f"K-Means: {df['KMeans_Anomaly'].sum()} anomalies")
print(f"Isolation Forest: {df['IsolationForest_Anomaly'].sum()}
anomalies")
print(f"Autoencoder: {df['Autoencoder_Anomaly'].sum()} anomalies")

# ✅ Step 4: Graphical Analysis

# 📊 **Bar Chart of Raw Anomaly Counts**


plt.figure(figsize=(8, 4))
plt.bar(["K-Means", "Isolation Forest", "Autoencoder"],
[df["KMeans_Anomaly"].sum(),
df["IsolationForest_Anomaly"].sum(), df["Autoencoder_Anomaly"].sum()],
color=['blue', 'green', 'red'])
plt.ylabel("Number of Anomalies")
plt.title("Raw Anomaly Detection: Unsupervised Models (No
Adjustments)")
plt.show()

# 📊 **Pie Chart of Anomaly Contribution**


plt.figure(figsize=(6, 6))
plt.pie([df["KMeans_Anomaly"].sum(),
df["IsolationForest_Anomaly"].sum(), df["Autoencoder_Anomaly"].sum()],
labels=["K-Means", "Isolation Forest", "Autoencoder"],
autopct='%1.1f%%', colors=['blue', 'green', 'red'],
startangle=140)
plt.title("Proportion of Anomalies Detected by Each Model")
plt.show()

# 📊 **Line Plot to Show Variability of Anomaly Detection**


anomaly_counts = [df["KMeans_Anomaly"].sum(),
df["IsolationForest_Anomaly"].sum(), df["Autoencoder_Anomaly"].sum()]
models = ["K-Means", "Isolation Forest", "Autoencoder"]

plt.figure(figsize=(8, 4))
plt.plot(models, anomaly_counts, marker='o', linestyle='-',
color='purple', markersize=8)
plt.xlabel("Unsupervised Models")
plt.ylabel("Number of Anomalies Detected")
plt.title("Anomaly Count Comparison Across Models")
plt.grid(True)
plt.show()

K-Means Anomalies Detected: 2400


Isolation Forest Anomalies Detected: 3000
Epoch 1/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - loss: 0.0370 - val_loss:
8.2304e-04
Epoch 2/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 7.7979e-04 -
val_loss: 7.6844e-04
Epoch 3/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 7.6028e-04 -
val_loss: 7.4825e-04
Epoch 4/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 7.1229e-04 -
val_loss: 6.8234e-04
Epoch 5/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 6.7886e-04 -
val_loss: 6.5951e-04
Epoch 6/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 6.5722e-04 -
val_loss: 6.3996e-04
Epoch 7/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 6.3924e-04 -
val_loss: 6.4625e-04
Epoch 8/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 6.2295e-04 -
val_loss: 6.3978e-04
Epoch 9/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 6.1267e-04 -
val_loss: 5.9533e-04
Epoch 10/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 5.8492e-04 -
val_loss: 5.6698e-04
Epoch 11/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 5.6148e-04 -
val_loss: 5.3114e-04
Epoch 12/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 5.2720e-04 -
val_loss: 5.4379e-04
Epoch 13/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 5.1377e-04 -
val_loss: 5.2002e-04
Epoch 14/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.7461e-04 -
val_loss: 5.1809e-04
Epoch 15/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.7452e-04 -
val_loss: 4.4882e-04
Epoch 16/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 4.5766e-04 -
val_loss: 4.5879e-04
Epoch 17/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.4781e-04 -
val_loss: 4.6660e-04
Epoch 18/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.4197e-04 -
val_loss: 4.2575e-04
Epoch 19/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3990e-04 -
val_loss: 4.4466e-04
Epoch 20/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.4134e-04 -
val_loss: 4.4507e-04
Epoch 21/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3783e-04 -
val_loss: 4.4422e-04
Epoch 22/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3395e-04 -
val_loss: 4.3790e-04
Epoch 23/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3502e-04 -
val_loss: 4.3625e-04
Epoch 24/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3476e-04 -
val_loss: 4.3679e-04
Epoch 25/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 4.2912e-04 -
val_loss: 4.3166e-04
Epoch 26/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 4.3117e-04 -
val_loss: 4.3612e-04
Epoch 27/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3078e-04 -
val_loss: 4.2666e-04
Epoch 28/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3022e-04 -
val_loss: 4.2269e-04
Epoch 29/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3068e-04 -
val_loss: 4.1426e-04
Epoch 30/30
375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 4.3030e-04 -
val_loss: 4.3368e-04
938/938 ━━━━━━━━━━━━━━━━━━━━ 1s 1ms/step
Autoencoder Anomalies Detected: 1744

Final Raw Anomaly Counts (No Adjustments):


K-Means: 2400 anomalies
Isolation Forest: 3000 anomalies
Autoencoder: 1744 anomalies
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from sklearn.metrics import accuracy_score, classification_report

# Generate synthetic dataset


np.random.seed(42)
n_samples = 5000 # More realistic dataset size

# Features
signal_power = np.random.uniform(-100, 0, n_samples) # dBm range
packet_loss = np.random.uniform(0, 50, n_samples) # Percentage loss
latency = np.random.uniform(1, 100, n_samples) # ms delay
interference_level = np.random.uniform(0, 1, n_samples) # Ratio of
interference
throughput = np.random.uniform(1, 1000, n_samples) # Mbps

# Jamming condition (more realistic patterns)


jammed = (signal_power < -70) & (packet_loss > 30) & (latency > 70) &
(interference_level > 0.6)
jammed = jammed.astype(int) # Convert to binary classification

# Create DataFrame
data = pd.DataFrame({
'signal_power': signal_power,
'packet_loss': packet_loss,
'latency': latency,
'interference_level': interference_level,
'throughput': throughput,
'jammed': jammed
})

# Save dataset
data.to_csv("5G_jamming_dataset.csv", index=False)
print("Dataset generated and saved as 5G_jamming_dataset.csv")

# Load dataset
data = pd.read_csv("5G_jamming_dataset.csv")

# Feature Selection
features = ['signal_power', 'packet_loss', 'latency',
'interference_level', 'throughput']
X = data[features]
y = data['jammed']

# Data Preprocessing
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Split data
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y,
test_size=0.2, random_state=42)

### --- SVM Model with Class Balancing ---


svm_model = SVC(kernel='rbf', C=10, gamma='scale',
class_weight='balanced')
svm_model.fit(X_train, y_train)
svm_pred = svm_model.predict(X_test)

### --- Random Forest Model with Cross-Validation ---


rf_model = RandomForestClassifier(n_estimators=200, max_depth=8,
min_samples_split=5, random_state=42)
cross_val_scores = cross_val_score(rf_model, X_train, y_train, cv=5)
rf_model.fit(X_train, y_train)
rf_pred = rf_model.predict(X_test)

### --- Deep Neural Network with Noise Augmentation ---


dnn_model = Sequential([
Dense(128, activation='relu', input_shape=(X_train.shape[1],)),
Dropout(0.3),
Dense(64, activation='relu'),
Dropout(0.3),
Dense(32, activation='relu'),
Dropout(0.2),
Dense(1, activation='sigmoid')
])

dnn_model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
dnn_model.fit(X_train + np.random.normal(0, 0.01, X_train.shape),
y_train, epochs=20, batch_size=32, validation_data=(X_test, y_test),
verbose=1)

# Evaluate Models
print("SVM Accuracy:", accuracy_score(y_test, svm_pred))
print("Random Forest Cross-Validation Mean Accuracy:",
np.mean(cross_val_scores))
print("Random Forest Test Accuracy:", accuracy_score(y_test, rf_pred))
print("DNN Accuracy:", dnn_model.evaluate(X_test, y_test, verbose=0)
[1])

print("\nClassification Report for SVM:\n",


classification_report(y_test, svm_pred))
print("\nClassification Report for RF:\n",
classification_report(y_test, rf_pred))

Dataset generated and saved as 5G_jamming_dataset.csv


Epoch 1/20

/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

125/125 ━━━━━━━━━━━━━━━━━━━━ 4s 9ms/step - accuracy: 0.8388 - loss:


0.3638 - val_accuracy: 0.9860 - val_loss: 0.0422
Epoch 2/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.9837 - loss:
0.0467 - val_accuracy: 0.9860 - val_loss: 0.0271
Epoch 3/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9868 - loss:
0.0297 - val_accuracy: 0.9930 - val_loss: 0.0198
Epoch 4/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.9856 - loss:
0.0285 - val_accuracy: 0.9930 - val_loss: 0.0158
Epoch 5/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9895 - loss:
0.0238 - val_accuracy: 0.9940 - val_loss: 0.0131
Epoch 6/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9921 - loss:
0.0184 - val_accuracy: 0.9980 - val_loss: 0.0097
Epoch 7/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9908 - loss:
0.0211 - val_accuracy: 0.9970 - val_loss: 0.0089
Epoch 8/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9934 - loss:
0.0165 - val_accuracy: 0.9990 - val_loss: 0.0083
Epoch 9/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9946 - loss:
0.0111 - val_accuracy: 0.9990 - val_loss: 0.0103
Epoch 10/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9970 - loss:
0.0101 - val_accuracy: 0.9940 - val_loss: 0.0117
Epoch 11/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9943 - loss:
0.0124 - val_accuracy: 0.9980 - val_loss: 0.0062
Epoch 12/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9957 - loss:
0.0113 - val_accuracy: 0.9950 - val_loss: 0.0120
Epoch 13/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.9951 - loss:
0.0109 - val_accuracy: 0.9960 - val_loss: 0.0081
Epoch 14/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.9958 - loss:
0.0138 - val_accuracy: 0.9970 - val_loss: 0.0091
Epoch 15/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9950 - loss:
0.0125 - val_accuracy: 0.9980 - val_loss: 0.0061
Epoch 16/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9966 - loss:
0.0109 - val_accuracy: 0.9990 - val_loss: 0.0064
Epoch 17/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9961 - loss:
0.0119 - val_accuracy: 0.9970 - val_loss: 0.0076
Epoch 18/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.9957 - loss:
0.0095 - val_accuracy: 0.9990 - val_loss: 0.0052
Epoch 19/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9962 - loss:
0.0121 - val_accuracy: 0.9980 - val_loss: 0.0058
Epoch 20/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9959 - loss:
0.0089 - val_accuracy: 0.9990 - val_loss: 0.0052
SVM Accuracy: 0.992
Random Forest Cross-Validation Mean Accuracy: 0.9984999999999999
Random Forest Test Accuracy: 1.0
DNN Accuracy: 0.9990000128746033

Classification Report for SVM:


precision recall f1-score support
0 1.00 0.99 1.00 986
1 0.64 1.00 0.78 14

accuracy 0.99 1000


macro avg 0.82 1.00 0.89 1000
weighted avg 0.99 0.99 0.99 1000

Classification Report for RF:


precision recall f1-score support

0 1.00 1.00 1.00 986


1 1.00 1.00 1.00 14

accuracy 1.00 1000


macro avg 1.00 1.00 1.00 1000
weighted avg 1.00 1.00 1.00 1000

import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from sklearn.metrics import accuracy_score, classification_report
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score

# Generate synthetic dataset


np.random.seed(42)
n_samples = 5000 # More realistic dataset size

# Features
signal_power = np.random.uniform(-100, 0, n_samples) # dBm range
packet_loss = np.random.uniform(0, 50, n_samples) # Percentage loss
latency = np.random.uniform(1, 100, n_samples) # ms delay
interference_level = np.random.uniform(0, 1, n_samples) # Ratio of
interference
throughput = np.random.uniform(1, 1000, n_samples) # Mbps

# Jamming condition (more realistic patterns)


jammed = (signal_power < -70) & (packet_loss > 30) & (latency > 70) &
(interference_level > 0.6)
jammed = jammed.astype(int) # Convert to binary classification

# Create DataFrame
data = pd.DataFrame({
'signal_power': signal_power,
'packet_loss': packet_loss,
'latency': latency,
'interference_level': interference_level,
'throughput': throughput,
'jammed': jammed
})

# Save dataset
data.to_csv("5G_jamming_dataset.csv", index=False)
print("Dataset generated and saved as 5G_jamming_dataset.csv")

# Load dataset
data = pd.read_csv("5G_jamming_dataset.csv")

# Feature Selection
features = ['signal_power', 'packet_loss', 'latency',
'interference_level', 'throughput']
X = data[features]
y = data['jammed']

# Data Preprocessing
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Split data
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y,
test_size=0.2, random_state=42)

### --- SVM Model with Class Balancing ---


svm_model = SVC(kernel='rbf', C=10, gamma='scale',
class_weight='balanced')
svm_model.fit(X_train, y_train)
svm_pred = svm_model.predict(X_test)

### --- Random Forest Model with Cross-Validation ---


rf_model = RandomForestClassifier(n_estimators=200, max_depth=8,
min_samples_split=5, random_state=42)
cross_val_scores = cross_val_score(rf_model, X_train, y_train, cv=5)
rf_model.fit(X_train, y_train)
rf_pred = rf_model.predict(X_test)

### --- Deep Neural Network with Noise Augmentation ---


dnn_model = Sequential([
Dense(128, activation='relu', input_shape=(X_train.shape[1],)),
Dropout(0.3),
Dense(64, activation='relu'),
Dropout(0.3),
Dense(32, activation='relu'),
Dropout(0.2),
Dense(1, activation='sigmoid')
])

dnn_model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
dnn_model.fit(X_train + np.random.normal(0, 0.01, X_train.shape),
y_train, epochs=20, batch_size=32, validation_data=(X_test, y_test),
verbose=1)

# Evaluate Models
print("SVM Accuracy:", accuracy_score(y_test, svm_pred))
print("Random Forest Cross-Validation Mean Accuracy:",
np.mean(cross_val_scores))
print("Random Forest Test Accuracy:", accuracy_score(y_test, rf_pred))
print("DNN Accuracy:", dnn_model.evaluate(X_test, y_test, verbose=0)
[1])

print("\nClassification Report for SVM:\n",


classification_report(y_test, svm_pred))
print("\nClassification Report for RF:\n",
classification_report(y_test, rf_pred))

### --- K-Means Clustering for Unsupervised Jamming Detection ---


kmeans = KMeans(n_clusters=2, random_state=42)
kmeans_labels = kmeans.fit_predict(X_scaled)

# Evaluate Clustering using Silhouette Score


silhouette_avg = silhouette_score(X_scaled, kmeans_labels)
print("\nSilhouette Score for K-Means Clustering:", silhouette_avg)

Dataset generated and saved as 5G_jamming_dataset.csv

/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/
dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim`
argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)

Epoch 1/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 8s 16ms/step - accuracy: 0.9643 - loss:
0.2224 - val_accuracy: 0.9860 - val_loss: 0.0370
Epoch 2/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.9838 - loss:
0.0424 - val_accuracy: 0.9880 - val_loss: 0.0245
Epoch 3/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.9888 - loss:
0.0247 - val_accuracy: 0.9940 - val_loss: 0.0182
Epoch 4/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.9871 - loss:
0.0251 - val_accuracy: 0.9960 - val_loss: 0.0151
Epoch 5/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9917 - loss:
0.0173 - val_accuracy: 0.9960 - val_loss: 0.0148
Epoch 6/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9945 - loss:
0.0153 - val_accuracy: 0.9960 - val_loss: 0.0114
Epoch 7/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9934 - loss:
0.0147 - val_accuracy: 0.9970 - val_loss: 0.0104
Epoch 8/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9944 - loss:
0.0154 - val_accuracy: 0.9960 - val_loss: 0.0101
Epoch 9/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9956 - loss:
0.0127 - val_accuracy: 0.9980 - val_loss: 0.0086
Epoch 10/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9930 - loss:
0.0157 - val_accuracy: 0.9970 - val_loss: 0.0098
Epoch 11/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9949 - loss:
0.0137 - val_accuracy: 0.9980 - val_loss: 0.0074
Epoch 12/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - accuracy: 0.9970 - loss:
0.0082 - val_accuracy: 0.9990 - val_loss: 0.0070
Epoch 13/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9964 - loss:
0.0115 - val_accuracy: 0.9980 - val_loss: 0.0079
Epoch 14/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.9969 - loss:
0.0089 - val_accuracy: 0.9980 - val_loss: 0.0068
Epoch 15/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.9952 - loss:
0.0118 - val_accuracy: 0.9980 - val_loss: 0.0060
Epoch 16/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9964 - loss:
0.0099 - val_accuracy: 0.9970 - val_loss: 0.0062
Epoch 17/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9960 - loss:
0.0097 - val_accuracy: 0.9960 - val_loss: 0.0093
Epoch 18/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9984 - loss:
0.0064 - val_accuracy: 0.9990 - val_loss: 0.0056
Epoch 19/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.9965 - loss:
0.0078 - val_accuracy: 0.9940 - val_loss: 0.0142
Epoch 20/20
125/125 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.9976 - loss:
0.0078 - val_accuracy: 0.9980 - val_loss: 0.0047
SVM Accuracy: 0.992
Random Forest Cross-Validation Mean Accuracy: 0.9984999999999999
Random Forest Test Accuracy: 1.0
DNN Accuracy: 0.9980000257492065

Classification Report for SVM:


precision recall f1-score support

0 1.00 0.99 1.00 986


1 0.64 1.00 0.78 14

accuracy 0.99 1000


macro avg 0.82 1.00 0.89 1000
weighted avg 0.99 0.99 0.99 1000

Classification Report for RF:


precision recall f1-score support

0 1.00 1.00 1.00 986


1 1.00 1.00 1.00 14

accuracy 1.00 1000


macro avg 1.00 1.00 1.00 1000
weighted avg 1.00 1.00 1.00 1000

Silhouette Score for K-Means Clustering: 0.14700095495209367

You might also like