AJAY KUMAR GARG ENGINEERING COLLEGE,
GHAZIABAD
DEPARTMENT OF COMPUTER SCIENCE & ENGINEERING
DEEP LEARNING LAB(BAI751)
COURSE : B.TECH
BRANCH : AIML -1
SECTION : AIML -1
SEMESTER : VII
GROUP :
SUBMITTED BY: SUBMITTED TO:
Name: Ujjwal Kaushik
RollNo.: 2200271640057
Session: 2025-2026 (Odd Sem.)
INDEX
DEEP LEARNING Lab (BAI751)
Name:-
Roll Number:-
S.No. Experiment Name Date Faculty Signature
1. Write a python program to Implement Perceptron for
understanding single-layer neural network?
2. Write a program to Visualize Activation Functions (Sigmoid,
ReLU, Tanh)?
3. Write a program to Build a Simple Feedforward Neural
Network?
4. Write a program to MNIST Digit Classification using Keras?
5. Write a program to Create and Visualize CNN Layers.
6. Write a program to CIFAR-10 Image Classification for
understanding Multiclass image classification using CNN.
7. Write a program to implement Image Augmentation
Techniques for application of preprocessing &
transformation?
8. Write a program using Dropout for Regularisation to
improve model generalisation.
9. Write a program to Build Model Using PyTorch?
10. Write a program to Compare Training with and without
Batch Normalization for Analyzing performance and
convergence?
Student’s Signature Faculty Signature
Program :- 1
Write a python program to Implement Perceptron for understanding single-
layer neural network?
Code:-
import numpy as np
class Perceptron:
def __init__(self, n_features, lr=0.1, epochs=10):
self.w = np.zeros(n_features + 1) # +1 bias
self.lr = lr
self.epochs = epochs
def predict_single(self, x):
x_b = np.insert(x, 0, 1.0)
return 1 if np.dot(self.w, x_b) >= 0 else 0
def fit(self, X, y):
for epoch in range(self.epochs):
errors = 0
for xi, yi in zip(X, y):
xi_b = np.insert(xi, 0, 1.0)
pred = 1 if np.dot(self.w, xi_b) >= 0 else 0
update = self.lr * (yi - pred)
if update != 0:
self.w += update * xi_b
errors += 1
print(f"Epoch {epoch+1}/{self.epochs} - errors: {errors}")
def predict(self, X):
return np.array([self.predict_single(x) for x in X])
# Demo: AND gate
X = np.array([[0,0],[0,1],[1,0],[1,1]])
y_and = np.array([0,0,0,1])
percep = Perceptron(n_features=2, lr=0.2, epochs=10)
percep.fit(X, y_and)
print('Weights:', percep.w)
print('Predictions:', percep.predict(X))
Output :-
Program :- 2
Write a program to Visualize Activation Functions (Sigmoid, ReLU, Tanh)?
Code:-
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x): return 1/(1+np.exp(-x))
def relu(x): return np.maximum(0, x)
def tanh(x): return np.tanh(x)
x = np.linspace(-6, 6, 400)
plt.figure(figsize=(8,4))
plt.plot(x, sigmoid(x), label='Sigmoid')
plt.plot(x, relu(x), label='ReLU')
plt.plot(x, tanh(x), label='Tanh')
plt.legend(); plt.grid(True)
plt.title('Activation functions')
plt.show()
Output :-
Program :- 3
Write a program to Build a Simple Feedforward Neural Network?
Code:-
import numpy as np
def sigmoid(x): return 1/(1+np.exp(-x))
X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[0],[1],[1],[0]])
np.random.seed(1)
W1 = 2*np.random.rand(2,2)-1
b1 = np.zeros((1,2))
W2 = 2*np.random.rand(2,1)-1
b2 = np.zeros((1,1))
lr = 0.5
for epoch in range(8000):
z1 = X.dot(W1) + b1
a1 = sigmoid(z1)
z2 = a1.dot(W2) + b2
a2 = sigmoid(z2)
d2 = (a2 - y) * a2 * (1-a2)
dW2 = a1.T.dot(d2)
db2 = d2.sum(axis=0, keepdims=True)
d1 = d2.dot(W2.T) * a1 * (1-a1)
dW1 = X.T.dot(d1)
db1 = d1.sum(axis=0, keepdims=True)
W2 -= lr * dW2; b2 -= lr * db2
W1 -= lr * dW1; b1 -= lr * db1
if epoch % 2000 == 0:
loss = np.mean((y - a2)**2)
print(f'Epoch {epoch}, loss {loss:.6f}')
print('Final outputs (rounded):', np.round(a2,3))
Output :-
Program :- 4
Write a program to MNIST Digit Classification using Keras?
Code:-
import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train[:8000].astype('float32')/255.0
y_train = y_train[:8000]
x_test = x_test[:2000].astype('float32')/255.0
y_test = y_test[:2000]
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
model = models.Sequential([
layers.Conv2D(16,3,activation='relu', input_shape=(28,28,1)),
layers.MaxPooling2D(2),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=3, batch_size=64,
validation_split=0.1)
loss, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
Output :-
Program :- 5
Write a program to Create and Visualize CNN Layers.
Code:-
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers, models
# ---- 1. Define model using Functional API ----
inputs = tf.keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(8, 3, activation='relu', name='conv1')(inputs)
x = layers.MaxPooling2D(2, name='pool1')(x)
x = layers.Conv2D(16, 3, activation='relu', name='conv2')(x)
x = layers.MaxPooling2D(2, name='pool2')(x)
model = tf.keras.Model(inputs=inputs, outputs=x,
name="cnn_feature_extractor")
model.summary()
# ---- 2. Load one MNIST image ----
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
img = x_train[0].astype('float32') / 255.0
img = np.expand_dims(img, axis=(0, -1)) # shape (1,28,28,1)
# ---- 3. Feature extractor for conv layers ----
feat_model = tf.keras.Model(
inputs=inputs,
outputs=[layer.output for layer in model.layers if 'conv' in layer.name]
)
# ---- 4. Get activations ----
acts = feat_model.predict(img)
# ---- 5. Plot feature maps ----
for li, act in enumerate(acts):
n_filters = act.shape[-1]
cols = min(n_filters, 8) # show up to 8 per row
rows = int(np.ceil(n_filters / cols))
plt.figure(figsize=(cols*2, rows*2))
for i in range(n_filters):
plt.subplot(rows, cols, i+1)
plt.imshow(act[0, :, :, i], cmap='viridis')
plt.axis('off')
plt.suptitle(f'Feature maps from Conv Layer {li+1}')
plt.show()
Output :-
Program :- 6
Write a program to CIFAR-10 Image Classification for understanding
Multiclass image classification using CNN.
Code:-
import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train[:8000].astype('float32')/255.0
y_train = y_train[:8000]
x_test = x_test[:2000].astype('float32')/255.0
y_test = y_test[:2000]
model = models.Sequential([
layers.Conv2D(32,3,activation='relu', input_shape=(32,32,3)),
layers.MaxPooling2D(2),
layers.Conv2D(64,3,activation='relu'),
layers.MaxPooling2D(2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=4, batch_size=64,
validation_split=0.1)
loss, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy (subset):', acc)
Output :-
Program :- 7
Write a program to implement Image Augmentation Techniques for
application of preprocessing & transformation?
Code:-
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.datasets import mnist
import numpy as np, matplotlib.pyplot as plt
(x, _), _ = mnist.load_data()
img = x[1].astype('float32')/255.0
img = np.stack([img]*3, axis=-1)
img = np.expand_dims(img, 0)
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1, zoom_range=0.2, horizontal_flip=True,
fill_mode='nearest')
it = datagen.flow(img, batch_size=1)
plt.figure(figsize=(12,3))
for i in range(8):
batch = next(it)[0]
plt.subplot(1,8,i+1)
plt.imshow(batch)
plt.axis('off')
plt.suptitle('Augmented samples')
plt.show()
Output :-
Program :- 8
Write a program using Dropout for Regularisation to improve model
generalisation.
Code:-
import tensorflow as tf, numpy as np
from tensorflow.keras import layers, models
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train[:8000].astype('float32')/255.0; y_train = y_train[:8000]
x_test = x_test[:2000].astype('float32')/255.0; y_test = y_test[:2000]
x_train = np.expand_dims(x_train, -1); x_test = np.expand_dims(x_test, -1)
def build(use_dropout=False):
m = models.Sequential()
m.add(layers.Conv2D(16,3, activation='relu', input_shape=(28,28,1)))
m.add(layers.MaxPooling2D(2))
m.add(layers.Flatten())
m.add(layers.Dense(64, activation='relu'))
if use_dropout:
m.add(layers.Dropout(0.5))
m.add(layers.Dense(10, activation='softmax'))
m.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return m
m_no = build(False); m_do = build(True)
h_no = m_no.fit(x_train, y_train, epochs=3, batch_size=64, validation_split=0.1,
verbose=1)
h_do = m_do.fit(x_train, y_train, epochs=3, batch_size=64, validation_split=0.1,
verbose=1)
print('NoDropout val_acc:', h_no.history['val_accuracy'][-1])
print('WithDropout val_acc:', h_do.history['val_accuracy'][-1])
Output :-
Program :- 9
Write a program to Build Model Using PyTorch?
Code:-
import torch, torch.nn as nn, torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Subset
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
train_full = datasets.MNIST('.', train=True, download=True,
transform=transform)
train_subset = Subset(train_full, range(0, 3000))
train_loader = DataLoader(train_subset, batch_size=64, shuffle=True)
test = datasets.MNIST('.', train=False, download=True, transform=transform)
test_loader = DataLoader(Subset(test, range(0,1000)), batch_size=128)
class SimpleNN(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Flatten(), nn.Linear(28*28,128), nn.ReLU(),
nn.Linear(128,10))
def forward(self, x): return self.net(x)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SimpleNN().to(device)
criterion = nn.CrossEntropyLoss(); opt = optim.Adam(model.parameters(),
lr=1e-3)
for epoch in range(2):
model.train(); running=0.0
for xb, yb in train_loader:
xb, yb = xb.to(device), yb.to(device)
opt.zero_grad(); out = model(xb); loss = criterion(out, yb); loss.backward();
opt.step()
running += loss.item()*xb.size(0)
print(f'Epoch {epoch+1}, loss {running/len(train_loader.dataset):.4f}')
model.eval(); correct=0; total=0
with torch.no_grad():
for xb, yb in test_loader:
xb, yb = xb.to(device), yb.to(device)
pred = model(xb).argmax(dim=1)
correct += (pred==yb).sum().item(); total += yb.size(0)
print('Test accuracy (subset):', correct/total)
Output :-
Program :- 10
Write a program to Compare Training with and without Batch Normalization
for Analyzing performance and convergence?
Code:-
import tensorflow as tf, numpy as np
from tensorflow.keras import layers, models
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train[:8000].astype('float32')/255.0; y_train = y_train[:8000]
x_test = x_test[:2000].astype('float32')/255.0; y_test = y_test[:2000]
x_train = np.expand_dims(x_train, -1); x_test = np.expand_dims(x_test, -1)
def build(use_bn=False):
inp = layers.Input(shape=(28,28,1))
x = layers.Conv2D(16,3,padding='same')(inp)
if use_bn: x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(64)(x)
if use_bn: x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
out = layers.Dense(10, activation='softmax')(x)
m = models.Model(inp, out)
m.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return m
m_no = build(False); m_bn = build(True)
h_no = m_no.fit(x_train, y_train, epochs=4, batch_size=64, validation_split=0.1,
verbose=1)
h_bn = m_bn.fit(x_train, y_train, epochs=4, batch_size=64, validation_split=0.1,
verbose=1)
for i in range(4):
print(f'Epoch {i+1}: NoBN val_acc={h_no.history["val_accuracy"][i]:.4f} | BN
val_acc={h_bn.history["val_accuracy"][i]:.4f}')
Output :-