EXERCISE-1
# Exercise 1: Build a Convolutional Neural Network (CNN) for Image
Recognition
import numpy as np
import [Link] as plt
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense,
BatchNormalization, Dropout
from [Link] import to_categorical
from [Link] import cifar10
# Load dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# CNN Model
model = Sequential([
Conv2D(32, (3,3), activation='relu', padding='same', input_shape=(32,32,3)),
MaxPooling2D((2,2)),
Conv2D(64, (3,3), activation='relu', padding='same'),
MaxPooling2D((2,2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax')
])
# Compile Model
[Link](optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train Model
[Link](x_train, y_train, epochs=10, batch_size=64, validation_data=(x_test, y_test))
# Evaluate Model
loss, acc = [Link](x_test, y_test)
print(f"Test Accuracy: {acc:.4f}")
# Predictions
predictions = [Link](x_test[:5])
for i in range(5):
[Link](figsize=(4,4),dpi=300)
[Link](x_test[i],interpolation=””)
[Link](“off”)
[Link](f"Predicted: {[Link](predictions[i])}, Actual: {[Link](y_test[i])}")
[Link]()
# Additional Exercises: Will be corrected and appended...
Output :
EXERCISE-3:
Code: Design a CNN for Image Recogntion which includes
hyperparameter tuning.
import tensorflow as tf
from tensorflow import keras
from [Link] import layers
import keras_tuner as kt
def build_model(hp):
model = [Link]()
[Link](layers.Conv2D(filters=[Link]('filters',
min_value=32,max_value=128, step=32),
kernel_size=[Link]('kernel_size', values=[3, 5]),
activation='relu',
input_shape=(28, 28, 1)))
[Link](layers.MaxPooling2D(pool_size=2))
[Link](layers.Conv2D(filters=[Link]('filters_2', min_value=32,
max_value=128, step=32),
kernel_size=[Link]('kernel_size_2', values=[3, 5]),
activation='relu'))
[Link](layers.MaxPooling2D(pool_size=2))
[Link]([Link]())
[Link]([Link](units=[Link]('units', min_value=32, max_value=128,
step=32), activation='relu'))
[Link]([Link](rate=[Link]('dropout', min_value=0.1,
max_value=0.5, step=0.1)))
[Link]([Link](10, activation='softmax'))
[Link](optimizer=[Link](learning_rate=[Link]('learn
ing_rate', min_value=1e-4, max_value=1e-2, sampling='LOG')),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# Load dataset (example: MNIST dataset)
(x_train, y_train), (x_test, y_test) = [Link].load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
# Hyperparameter tuning
tuner = [Link](build_model,
objective='val_accuracy',
max_epochs=10,
factor=3,
directory='hyperparam_tuning',
project_name='cnn_tuning')
def early_stopping_callback():
return [Link](monitor='val_loss', patience=3)
[Link](x_train, y_train, validation_split=0.2, epochs=10,
callbacks=[early_stopping_callback()])
# Get the best model
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
model = [Link](best_hps)
# Train the best model
[Link](x_train, y_train, validation_data=(x_test, y_test), epochs=10,
callbacks=[early_stopping_callback()])
# Evaluate the model
loss, accuracy = [Link](x_test, y_test)
print(f"Test Accuracy: {accuracy:.4f}")
output:
Epoch 2/2
1500/1500--------------------------------17s 127ms/step - acuuracy:0.99995 – loss: 0.5478
EXERCISE-4:
: Implement a Recurrence Neural Network for
Predicting Sequential Data
Code:
import numpy as np
import tensorflow as tf
from [Link] import Sequential
from [Link] import SimpleRNN, Dense
from sklearn.model_selection import train_test_split
import [Link] as plt
# Generate sample sequential data (Sine Wave)
timesteps = [Link](0, 100, 1000)
data = [Link](timesteps)
# Function to create dataset for RNN
def create_dataset(data, time_step=20):
X, y = [], []
for i in range(len(data) - time_step):
[Link](data[i:i + time_step])
[Link](data[i + time_step])
return [Link](X), [Link](y)
time_step = 20 # Number of past timesteps used for prediction
X, y = create_dataset(data, time_step)
# Reshape data to fit RNN input (samples, time steps, features)
X = [Link](([Link][0], [Link][1], 1))
# Split into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
shuffle=False)
# Build RNN Model
model = Sequential([
SimpleRNN(units=50, activation='relu', input_shape=(time_step, 1)),
Dense(units=1)
])
# Compile the model
[Link](optimizer='adam', loss='mean_squared_error')
# Train the model
[Link](X_train, y_train, epochs=50, batch_size=32, verbose=1)
# Evaluate the model
loss = [Link](X_test, y_test)
print(f"Test Loss: {loss}")
# Make predictions
predictions = [Link](X_test)
# Visualize results
[Link](y_test, label='Actual Data')
[Link](predictions, label='Predicted Data')
[Link]()
[Link]("RNN Predictions vs Actual Data")
[Link]()
output:
/usr/local/lib/python3.11/dist-packages/keras/src/layers/rnn/[Link]:
UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer.
When using Sequential models, prefer using an `Input(shape)` object as the
first layer in the model instead.
super().__init__(**kwargs)
Epoch 1/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 5s 9ms/step - loss: 0.3625
Epoch 2/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - loss: 0.0150
Epoch 3/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 15ms/step - loss: 0.0016
Epoch 4/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - loss: 3.2351e-04
Epoch 5/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - loss: 1.8168e-04
Epoch 6/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 1.0661e-04
Epoch 7/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 6.9849e-05
Epoch 8/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 4.6706e-05
Epoch 9/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - loss: 4.3910e-05
Epoch 10/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 3.5222e-05
Epoch 11/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 2.4322e-05
Epoch 12/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.9496e-05
Epoch 13/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.5485e-05
Epoch 14/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.5900e-05
Epoch 15/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.2644e-05
Epoch 16/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.3633e-05
Epoch 17/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.3374e-05
Epoch 18/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 8.4114e-06
Epoch 19/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.1619e-05
Epoch 20/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 1.3083e-05
Epoch 21/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.9959e-06
Epoch 22/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 7.2074e-06
Epoch 23/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.8178e-06
Epoch 24/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 5.3313e-06
Epoch 25/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.8431e-06
Epoch 26/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 4.3399e-06
Epoch 27/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.3617e-06
Epoch 28/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.7868e-06
Epoch 29/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.7098e-06
Epoch 30/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.0948e-06
Epoch 31/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.1495e-06
Epoch 32/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 3.6084e-06
Epoch 33/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 3.8976e-06
Epoch 34/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.4221e-06
Epoch 35/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.2972e-06
Epoch 36/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.5364e-06
Epoch 37/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 2.8882e-06
Epoch 38/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.9125e-06
Epoch 39/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 3.7844e-06
Epoch 40/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 2.1490e-06
Epoch 41/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 2.9625e-06
Epoch 42/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - loss: 1.8659e-06
Epoch 43/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 1.9545e-06
Epoch 44/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - loss: 2.4747e-06
Epoch 45/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - loss: 2.5827e-06
Epoch 46/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.1454e-06
Epoch 47/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.0774e-06
Epoch 48/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.2902e-06
Epoch 49/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.9725e-06
Epoch 50/50
25/25 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.2797e-06
7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.3730e-06
Test Loss: 1.3749261142947944e-06
7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
EXERCISE-5
: Implement Multi-Layer Perceptron algorithm for
Image denoising hyperparameter tuning.
Code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from [Link] import layers
import keras_tuner as kt
import [Link] as plt
# Load the MNIST dataset
(x_train, _), (x_test, _) =
[Link].load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 #
Normalize
# Add noise to the images
noise_factor = 0.5
x_train_noisy = x_train + noise_factor *
[Link](size=x_train.shape)
x_test_noisy = x_test + noise_factor *
[Link](size=x_test.shape)
# Clip the values to stay in [0,1]
x_train_noisy = [Link](x_train_noisy, 0., 1.)
x_test_noisy = [Link](x_test_noisy, 0., 1.)
# Flatten images for MLP
x_train_noisy = x_train_noisy.reshape(-1, 28*28)
x_test_noisy = x_test_noisy.reshape(-1, 28*28)
x_train = x_train.reshape(-1, 28*28)
x_test = x_test.reshape(-1, 28*28)
# Define the MLP model with hyperparameter tuning
def build_model(hp):
model = [Link]()
[Link]([Link](shape=(28*28,)))
for i in range([Link]('num_layers', 1, 3)):
[Link]([Link]([Link](f'units_{i}', 64, 256,
step=64), activation='relu'))
[Link]([Link](28*28,
activation='sigmoid'))
[Link](optimizer=[Link](hp.C
hoice('learning_rate', [0.001, 0.0005, 0.0001])),
loss='mse')
return model
# Use Keras Tuner for hyperparameter tuning
tuner = [Link](
build_model,
objective='val_loss',
max_trials=5,
executions_per_trial=1,
directory='mlp_denoising_tuning',
project_name='image_denoising'
)
# Perform hyperparameter tuning
[Link](x_train_noisy, x_train, epochs=10,
validation_data=(x_test_noisy, x_test), verbose=1)
# Get the best model
best_hps =
tuner.get_best_hyperparameters(num_trials=1)[0]
best_model = [Link](best_hps)
# Train the best model
best_model.fit(x_train_noisy, x_train, epochs=20,
validation_data=(x_test_noisy, x_test))
# Denoise images
x_test_denoised =
best_model.predict(x_test_noisy).reshape(-1, 28, 28)
# Plot original, noisy, and denoised images
n=5
[Link](figsize=(10, 5))
for i in range(n):
[Link](3, n, i+1)
[Link](x_test[i].reshape(28, 28), cmap='gray')
[Link]('off')
[Link](3, n, i+n+1)
[Link](x_test_noisy[i].reshape(28, 28),
cmap='gray')
[Link]('off')
[Link](3, n, i+2*n+1)
[Link](x_test_denoised[i], cmap='gray')
[Link]('off')
[Link]()
OUTPUT:
Trial 1 Complete [00h 02m 48s]
val_loss: 0.017095204442739487
Best val_loss So Far: 0.017095204442739487
Total elapsed time: 00h 02m 48s
Search: Running Trial #2
Value |Best Value So Far |Hyperparameter
1 |3 |num_layers
128 |256 |units_0
0.0001 |0.0005 |learning_rate
192 |64 |units_1
64 |64 |units_2
Epoch 1/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0843 - val_loss: 0.0387
Epoch 2/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 22s 7ms/step
- loss: 0.0360 - val_loss: 0.0291
Epoch 3/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0278 - val_loss: 0.0242
Epoch 4/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 20s 6ms/step
- loss: 0.0238 - val_loss: 0.0216
Epoch 5/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 20s 6ms/step
- loss: 0.0215 - val_loss: 0.0200
Epoch 6/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 22s 6ms/step
- loss: 0.0199 - val_loss: 0.0190
Epoch 7/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0189 - val_loss: 0.0182
Epoch 8/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 12s 6ms/step
- loss: 0.0181 - val_loss: 0.0177
Epoch 9/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 21s 6ms/step
- loss: 0.0176 - val_loss: 0.0172
Epoch 10/10
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 19s 6ms/step
- loss: 0.0171 - val_loss: 0.0169
EXERCISE-6:
Code: Implement Object Detec on Using YOLO.
# Install Ultralytics YOLOv8 (if not installed)
!pip install ultralytics
# Import required libraries
import cv2
import os
from [Link] import files
from [Link] import cv2_imshow
from ultralytics import YOLO
# Upload an image manually
uploaded = [Link]()
# Get the uploaded filename dynamically
image_path = list([Link]())[0]
# Check if the file exists
if not [Link](image_path):
print(f" Error: File '{image_path}' not found!")
else:
image = [Link](image_path)
if image is None:
print(f"Error: Could not read '{image_path}'! Check the file format.")
else:
# Load YOLO model
model = YOLO("[Link]") # Using YOLOv8 nano model
# Run YOLO object detection
results = model(image)
# Draw bounding boxes
for result in results:
for box in [Link]:
x1, y1, x2, y2 = map(int, [Link][0]) # Get bounding box coordinates
conf = [Link][0].item() # Confidence score
cls = int([Link][0].item()) # Class ID
label = f"{[Link][cls]}: {conf:.2f}"
# Draw rectangle and label
[Link](image, (x1, y1), (x2, y2), (0, 255, 0), 2)
[Link](image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
# Show image with detections
cv2_imshow(image)
output:
first time execution:
Second time execution:
EXERCISE-7:
Code: Design a Deep learning Network for Robust Bi-
Tempered Logis c Loss
from [Link] import layers, models, Input
# Define Model with Explicit Input Layer
def build_cnn(input_shape, num_classes):
model = [Link]([
Input(shape=input_shape), # Explicit Input Layer
layers.Conv2D(32, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(128, (3, 3), activation='relu'),
[Link](),
[Link](128, activation='relu'),
[Link](num_classes) # No activation (for custom loss)
])
return model
# Parameters
input_shape = (32, 32, 3) # Example: CIFAR-10 dataset
num_classes = 10
# Build and compile the model
model = build_cnn(input_shape, num_classes)
[Link](optimizer='adam', loss=lambda y_true, y_pred:
bi_tempered_logistic_loss(y_true, y_pred, t1=0.8, t2=1.2),
metrics=['accuracy'])
# Display model summary
[Link]()
output:
EXECISE-8:
Code: Build AlexNet using Advanced CNN
import tensorflow as tf
from [Link] import layers, models, Input
def build_alexnet(input_shape=(227, 227, 3), num_classes=1000):
model = [Link]([
# 1st Convolutional Layer
Input(shape=input_shape),
layers.Conv2D(96, (11, 11), strides=4, activation='relu', padding='valid'),
[Link](),
layers.MaxPooling2D((3, 3), strides=2),
# 2nd Convolutional Layer
layers.Conv2D(256, (5, 5), activation='relu', padding='same'),
[Link](),
layers.MaxPooling2D((3, 3), strides=2),
# 3rd Convolutional Layer
layers.Conv2D(384, (3, 3), activation='relu', padding='same'),
# 4th Convolutional Layer
layers.Conv2D(384, (3, 3), activation='relu', padding='same'),
# 5th Convolutional Layer
layers.Conv2D(256, (3, 3), activation='relu', padding='same'),
layers.MaxPooling2D((3, 3), strides=2),
# Flatten and Fully Connected Layers
[Link](),
[Link](4096, activation='relu'),
[Link](0.5),
[Link](4096, activation='relu'),
[Link](0.5),
[Link](num_classes, activation='softmax') # Output layer
])
return model
# Build and Compile Model
alexnet = build_alexnet(input_shape=(227, 227, 3),
num_classes=1000)
[Link](optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# Display Model Summary
[Link]()
OUTPUT:
import numpy as np
import [Link] as plt
from [Link] import Sequential
from [Link] import Dense, Dropout,
BatchNormalization, Flatten, Activation
from [Link] import to_categorical
from [Link] import Adam
from [Link] import cifar10 # Load dataset
# Load dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize pixel values (0 to 1)
x_train, x_test = x_train / 255.0, x_test / 255.0
# One-hot encode labels
y_train_encoded = to_categorical(y_train, num_classes=10)
y_test_encoded = to_categorical(y_test, num_classes=10)
# Define the model
model = Sequential()
[Link](Flatten(input_shape=(32, 32, 3))) # CIFAR-10 images are
32x32x3
# Dense Layers
[Link](Dense(4096, activation='relu'))
[Link](Dropout(0.4))
[Link](BatchNormalization())
[Link](Dense(4096, activation='relu'))
[Link](Dropout(0.4))
[Link](BatchNormalization())
# Output Layer
[Link](Dense(10, activation='softmax')) # 10 classes in CIFAR-10
# Compile the model
[Link](optimizer=Adam(), loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
[Link](x_train, y_train_encoded, epochs=10, batch_size=64,
shuffle=True, validation_split=0.1)
# Evaluate the model
eval = [Link](x_test, y_test_encoded)
print(f"Evaluation Loss: {eval[0]}, Accuracy: {eval[1]}")
# Make predictions
predictions = [Link](x_test[:100])
predicted_class = [Link](predictions[0])
# Display first test image and its prediction
[Link](x_test[0])
[Link](f"Predicted Class: {predicted_class}")
[Link]()
output:
Epoch 1/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 300s 423ms/step - accuracy:
0.2408 - loss: 2.7379 - val_accuracy: 0.2472 - val_loss: 2.2091
Epoch 2/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 294s 418ms/step - accuracy:
0.2989 - loss: 2.0863 - val_accuracy: 0.3036 - val_loss: 2.0798
Epoch 3/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 323s 419ms/step - accuracy:
0.3067 - loss: 1.9623 - val_accuracy: 0.3048 - val_loss: 2.3234
Epoch 4/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 329s 430ms/step - accuracy:
0.3201 - loss: 1.9092 - val_accuracy: 0.3684 - val_loss: 2.6944
Epoch 5/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 319s 426ms/step - accuracy:
0.3229 - loss: 1.8966 - val_accuracy: 0.3454 - val_loss: 2.9305
Epoch 6/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 323s 427ms/step - accuracy:
0.3256 - loss: 1.8717 - val_accuracy: 0.3846 - val_loss: 3.0424
Epoch 7/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 298s 424ms/step - accuracy:
0.3290 - loss: 1.8508 - val_accuracy: 0.3576 - val_loss: 2.8947
Epoch 8/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 325s 429ms/step - accuracy:
0.3315 - loss: 1.8492 - val_accuracy: 0.4048 - val_loss: 3.8287
Epoch 9/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 302s 430ms/step - accuracy:
0.3436 - loss: 1.8211 - val_accuracy: 0.2302 - val_loss: 12.9044
Epoch 10/10
704/704 ━━━━━━━━━━━━━━━━━━━━ 303s 431ms/step - accuracy:
0.3450 - loss: 1.8126 - val_accuracy: 0.3766 - val_loss: 3.2894
313/313 ━━━━━━━━━━━━━━━━━━━━ 11s 34ms/step - accuracy:
0.3874 - loss: 1.9723
Evaluation Loss: 2.0299267768859863, Accuracy:
0.3774000108242035
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 48ms/step
EXERCISE-9:
CODE: Demonstration of Application of Autoencoders.
import numpy as np
import [Link] as plt
from [Link] import Model, Sequential
from [Link] import Input, Dense, Flatten, Reshape
from [Link] import mnist
# Load dataset
(x_train, _), (x_test, _) = mnist.load_data()
# Normalize images (0 to 1)
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add random noise
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * [Link](loc=0.0, scale=1.0,
size=x_train.shape)
x_test_noisy = x_test + noise_factor * [Link](loc=0.0, scale=1.0,
size=x_test.shape)
x_train_noisy = [Link](x_train_noisy, 0., 1.)
x_test_noisy = [Link](x_test_noisy, 0., 1.)
# Define Autoencoder Model
input_img = Input(shape=(28, 28))
encoded = Flatten()(input_img)
encoded = Dense(64, activation='relu')(encoded)
decoded = Dense(28 * 28, activation='sigmoid')(encoded)
decoded = Reshape((28, 28))(decoded)
autoencoder = Model(input_img, decoded)
[Link](optimizer='adam', loss='binary_crossentropy')
# Train Autoencoder
[Link](x_train_noisy, x_train, epochs=10, batch_size=256,
shuffle=True, validation_data=(x_test_noisy, x_test))
# Get Predictions
decoded_imgs = [Link](x_test_noisy)
# Display Noisy vs Denoised Images
n = 10
[Link](figsize=(20, 4))
for i in range(n):
# Noisy Image
[Link](2, n, i + 1)
[Link](x_test_noisy[i], cmap='gray')
[Link]('off')
# Denoised Image
[Link](2, n, i + 1 + n)
[Link](decoded_imgs[i], cmap='gray')
[Link]('off')
[Link]()
OUPUT:
EXERCISE-10:
Code: Demonstration of GAN
import torch
import [Link] as nn
import [Link] as optim
import torchvision
import [Link] as transforms
import [Link] as plt
# Hyperparameters
latent_dim = 100
batch_size = 64
epochs = 10
lr = 0.0002
# Load MNIST Dataset
transform = [Link]([[Link](),
[Link]((0.5,), (0.5,))])
dataset = [Link](root="./data", train=True,
transform=transform, download=True)
dataloader = [Link](dataset,
batch_size=batch_size, shuffle=True)
# Define Generator
class Generator([Link]):
def __init__(self):
super(Generator, self).__init__()
[Link] = [Link](
[Link](latent_dim, 128),
[Link](),
[Link](128, 256),
[Link](),
[Link](256, 784),
[Link]()
)
def forward(self, z):
return [Link](z).view(-1, 1, 28, 28)
# Define Discriminator
class Discriminator([Link]):
def __init__(self):
super(Discriminator, self).__init__()
[Link] = [Link](
[Link](784, 256),
[Link](),
[Link](256, 128),
[Link](),
[Link](128, 1),
[Link]()
)
def forward(self, img):
return [Link]([Link]([Link](0), -1))
# Initialize models
generator = Generator()
discriminator = Discriminator()
# Loss and Optimizers
criterion = [Link]()
optimizer_G = [Link]([Link](), lr=lr)
optimizer_D = [Link]([Link](), lr=lr)
# Training Loop
for epoch in range(epochs):
for real_imgs, _ in dataloader:
batch_size = real_imgs.size(0)
# Generate fake images
z = [Link](batch_size, latent_dim)
fake_imgs = generator(z)
# Labels
real_labels = [Link](batch_size, 1)
fake_labels = [Link](batch_size, 1)
# Train Discriminator
optimizer_D.zero_grad()
real_loss = criterion(discriminator(real_imgs), real_labels)
fake_loss = criterion(discriminator(fake_imgs.detach()),
fake_labels)
d_loss = real_loss + fake_loss
d_loss.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
g_loss = criterion(discriminator(fake_imgs), real_labels)
g_loss.backward()
optimizer_G.step()
print(f"Epoch {epoch+1}/{epochs} | D Loss: {d_loss:.4f} | G Loss:
{g_loss:.4f}")
# Generate and Display Fake Images
z = [Link](16, latent_dim)
fake_imgs = generator(z).detach()
fig, axes = [Link](4, 4, figsize=(5, 5))
for i, ax in enumerate([Link]()):
[Link](fake_imgs[i].squeeze(), cmap="gray")
[Link]("off")
[Link]()
OUTPUT: