0% found this document useful (0 votes)
5 views28 pages

GENAI Lab File

Uploaded by

heyabhinav23
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views28 pages

GENAI Lab File

Uploaded by

heyabhinav23
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd

AMITY UNIVERSITY UTTAR PRADESH

Amity School Of Engineering And


Technology

Department Of Information And


Technology
< Generative Artificial Intelligence (AIML30) >

PRACTICAL FILE

NAME: KESHAV SINGH

ENROLLMENT NO: A023165822006

COURSE: B. TECH CS&BS

SECTION - 7CSBS

SUBMITTED TO : Dr Sobia Habib


Program -1
AIM: Write a Python program to train a perceptron to implement the NOR logic gate
using Python.
Code:
import numpy as np
# NOR Truth Table:
# x1 | x2 | y (NOR)
# 0| 0|1
# 0| 1|0
# 1| 0|0
# 1| 1|0
# Input and output data
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
y = np.array([[1], [0], [0], [0]])
# Add bias to inputs (x0 = 1)
X_bias = np.hstack((np.ones((X.shape[0], 1)), X)) # Shape: (4, 3)
# Step activation function
def step(x):
return np.where(x >= 0, 1, 0)
# Initialize weights randomly (3 weights: bias + 2 inputs)
np.random.seed(42)
weights = np.random.randn(3, 1)
# Training settings
lr = 0.1
epochs = 20
for epoch in range(epochs):
total_error = 0
for i in range(len(X)):
x_i = X_bias[i].reshape(1, -1)
target = y[i]
z = np.dot(x_i, weights)
output = step(z)
error = target - output
weights += lr * x_i.T * error
total_error += abs(error)
print(f"Epoch {epoch+1}, Total Error: {int(total_error.item())}")
# Final Predictions
print("\nFinal Predictions for NOR Gate:")
for i in range(len(X)):
x_i = X_bias[i].reshape(1, -1)
z = np.dot(x_i, weights)
output = step(z)
print(f"Input: {X[i]}, Predicted: {output[0][0]}, Actual: {y[i][0]}")
print("Keshav Singh")
Output:
Program -2
AIM: Write a Python program to implement backpropagation.
Code:
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
# Step 1: Load and preprocess the Iris dataset
iris = load_iris()
X = iris.data
y = iris.target
# Convert to binary classification: 1 if Setosa, else 0
y = (y == 0).astype(int).reshape(-1, 1)
# Step 2: Standardize the data
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Step 3: Split into train/test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
# Step 4: Activation functions and derivatives
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def sigmoid_derivative(z):
return sigmoid(z) * (1 - sigmoid(z))
# Step 5: Initialize weights and biases
input_size = X.shape[1]
hidden_size = 4
output_size = 1
np.random.seed(42)
W1 = np.random.randn(input_size, hidden_size)
b1 = np.zeros((1, hidden_size))
W2 = np.random.randn(hidden_size, output_size)
b2 = np.zeros((1, output_size))
# Step 6: Train the model using backpropagation
learning_rate = 0.1
epochs = 100
for epoch in range(epochs):
# Forward pass
Z1 = np.dot(X_train, W1) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(A1, W2) + b2
A2 = sigmoid(Z2)
# Loss
m = y_train.shape[0]
loss = -np.mean(y_train * np.log(A2 + 1e-8) + (1 - y_train) * np.log(1 - A2 + 1e-8))
# Backpropagation
dZ2 = A2 - y_train
dW2 = np.dot(A1.T, dZ2) / m
db2 = np.sum(dZ2, axis=0, keepdims=True) / m
dA1 = np.dot(dZ2, W2.T)
dZ1 = dA1 * sigmoid_derivative(Z1)
dW1 = np.dot(X_train.T, dZ1) / m
db1 = np.sum(dZ1, axis=0, keepdims=True) / m
# Update weights and biases
W1 -= learning_rate * dW1
b1 -= learning_rate * db1
W2 -= learning_rate * dW2
b2 -= learning_rate * db2
if epoch % 10 == 0:
print(f"Epoch {epoch}, Loss: {loss:.4f}")
# Step 7: Predict and evaluate
def predict(X):
Z1 = np.dot(X, W1) + b1
A1 = sigmoid(Z1)
Z2 = np.dot(A1, W2) + b2
A2 = sigmoid(Z2)
return (A2 > 0.5).astype(int)
y_pred = predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("\nTest Accuracy:", accuracy)
print("Keshav Singh")

Output:
Program -3

AIM: Write a program to split datasets into training, validation, and testing sets, and
evaluate various ML and neural network models with different hyperparameters for
performance comparison.

Code:
from sklearn.model_selection import train_test_split
# Step 1: Create a small dataset
X = ['sample0', 'sample1', 'sample2', 'sample3', 'sample4',
'sample5', 'sample6', 'sample7', 'sample8', 'sample9']
y = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # labels
# Step 2: First split: train+val vs test (80% train+val, 20% test)
X_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Step 3: Second split: train vs val (from temp)
X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=0.25,
random_state=42)
# Note: 0.25 of 8 = 2 → 6 train, 2 val
# Step 4: Show results
print("Full Dataset:")
print(list(zip(X, y)))
print("\nTraining Set:")
print(list(zip(X_train, y_train)))
print("\nValidation Set:")
print(list(zip(X_val, y_val)))
print("\nTesting Set:")
print(list(zip(X_test, y_test)))
print("Keshav Singh")

Output:

Code:
#use of random.seed(42)-everytime same output
import numpy as np
np.random.seed(42)
print(np.random.randint(1, 100, 5)) # Always prints: [52 93 15 72 61]
np.random.seed(10)
print(np.random.randint(1, 100, 5)) # Same again: [52 93 15 72 61]
print("Keshav Singh")
Output:

Code:
#without random.seed(42)
import numpy as np
print(np.random.randint(1, 100, 5)) # Different output every run
print("Keshav Singh")

Output:

Code:
from sklearn.model_selection import train_test_split
import pandas as pd
# Step 1: Create small dataset with 10 samples
data = {
'x1': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
'x2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'y': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
}
df = pd.DataFrame(data)
# Step 2: Split into train+val and test (80%-20%)
df_temp, df_test = train_test_split(df, test_size=0.2, random_state=42)
# Step 3: Split temp into train and validation (75%-25% of 8 = 6 train, 2 val)
df_train, df_val = train_test_split(df_temp, test_size=0.25, random_state=42)
# Step 4: Show results
print("🔹 Full Dataset:")
print(df, "\n")
print("✅ Training Set:")
print(df_train, "\n")
print("🔸 Validation Set:")
print(df_val, "\n")
print("🧪 Testing Set:")
print(df_test)
print("Keshav Singh")

Output:

Code:
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
# Step 1: Create synthetic dataset with 10 samples and 23 features each
np.random.seed(42)
X = np.random.randint(0, 100, size=(10, 23)) # 10 samples, 23 features
y = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1]) # labels
# Step 2: First split → Train+Val (80%) and Test (20%)
X_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Step 3: Second split → Train (75% of 8) and Validation (25% of 8)
X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=0.25,
random_state=42)
# Step 4: Convert to DataFrames for clearer display
train_df = pd.DataFrame(X_train, columns=[f"F{i+1}" for i in range(23)])
train_df["Label"] = y_train
val_df = pd.DataFrame(X_val, columns=[f"F{i+1}" for i in range(23)])
val_df["Label"] = y_val
test_df = pd.DataFrame(X_test, columns=[f"F{i+1}" for i in range(23)])
test_df["Label"] = y_test

# Step 5: Display the sets


print("🔹 Training Set (Shape:", X_train.shape, ")")
print(train_df, "\n")
print("🔹 Validation Set (Shape:", X_val.shape, ")")
print(val_df, "\n")
print("🔹 Test Set (Shape:", X_test.shape, ")")
print(test_df)
print("Keshav Singh")

Output:

Code:
#load dataset from scikit learn
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import pandas as pd
# Load iris dataset
iris = load_iris(as_frame=True) # gives pandas dataframe
df = iris.frame
# Features and target
X = df.drop(columns='target')
y = df['target']
# Split into train+val and test (80% train+val, 20% test)
X_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Split train+val into 75% train, 25% val (i.e., 60% train, 20% val overall)
X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=0.25,
random_state=42)
# Print shapes
print("Train shape:", X_train.shape)
print("Validation shape:", X_val.shape)
print("Test shape:", X_test.shape)
print("Keshav Singh")

Output:

Code:
#Load a CSV from the Internet (via URL)
import pandas as pd
from sklearn.model_selection import train_test_split
# Load dataset from UCI link or GitHub raw CSV
url = "https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv"
df = pd.read_csv(url)
# Features and label
X = df.drop(columns='species')
y = df['species']
# Split like before
X_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=0.25,
random_state=42)
print("Train shape:", X_train.shape)
print("Validation shape:", X_val.shape)
print("Test shape:", X_test.shape)
print("Keshav Singh ")

Output:
 Code:
#To upload from your own system
from google.colab import files
uploaded = files.upload()

Output:

 Code:
import pandas as pd
df = pd.read_excel('CLUB.xlsx')
print(df.head())
print("Keshav Singh")

Output:

Code:
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
import pandas as pd
# Load Iris dataset
iris = load_iris(as_frame=True)
df = iris.frame
# Features and target
X = df.drop(columns='target')
y = df['target']
# Split into train+val and test (80% train+val, 20% test)
X_temp, X_test, y_temp, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Split train+val into 75% train, 25% val (i.e., 60% train, 20% val)
X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=0.25,
random_state=42)
# 1️Build and train model (Logistic Regression)
model = LogisticRegression(max_iter=200)
model.fit(X_train, y_train)
# 2 Evaluate on validation set
val_preds = model.predict(X_val)
print("Validation Accuracy:", accuracy_score(y_val, val_preds))
# 3️Final evaluation on test set
test_preds = model.predict(X_test)
print("\nTest Accuracy:", accuracy_score(y_test, test_preds))
# 4 Classification report
print("\nClassification Report:\n", classification_report(y_test, test_preds))
print("Keshav Singh")

Output:

 Code:
#change the model to decision tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier # Import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report
# Use the same training, validation, and test splits from before
# 1️Create and train a Decision Tree model
#model = DecisionTreeClassifier(random_state=42)
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, y_train)
# 2️Predict on validation data
val_preds = model.predict(X_val)
print("Validation Accuracy:", accuracy_score(y_val, val_preds))
# 3️Predict on test data
test_preds = model.predict(X_test)
print("Test Accuracy:", accuracy_score(y_test, test_preds))
# 4️Show classification report
print("\nClassification Report:\n", classification_report(y_test, test_preds))
#test with some other model also
#model = KNeighborsClassifier(n_neighbors=3)
#model = RandomForestClassifier(n_estimators=100, random_state=42)
#model = SVC(kernel='linear') support vector machine
print("Keshav Singh ")

Output:

 Code:
#see actual tree
from sklearn.tree import plot_tree
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
# Create and train a Decision Tree model for plotting
model_tree = DecisionTreeClassifier(random_state=42)
model_tree.fit(X_train, y_train)
plt.figure(figsize=(12, 6))
plot_tree(model_tree, feature_names=X.columns, class_names=iris.target_names,
filled=True)
plt.title("Decision Tree for Iris Dataset")
plt.show()
print("Keshav Singh")

Output:

 Code:
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, classification_report
# Try different learning rates, activation functions, and hidden layer sizes
configs = [
{"activation": "relu", "learning_rate_init": 0.01, "hidden_layer_sizes": (10,)},#One hidden
layer with 10 neurons.
{"activation": "tanh", "learning_rate_init": 0.001, "hidden_layer_sizes": (5, 5)},#Two
hidden layers, each with 5 neurons.
{"activation": "logistic", "learning_rate_init": 0.005, "hidden_layer_sizes": (20,)},#One
hidden layer with 20 neurons.we can write 5 HL with 4 neurons each like
this=hidden_layer_sizes = (4, 4, 4, 4, 4)
]

for cfg in configs:


model = MLPClassifier(
activation=cfg["activation"],
learning_rate_init=cfg["learning_rate_init"],
hidden_layer_sizes=cfg["hidden_layer_sizes"],
max_iter=1000,
random_state=42
)
model.fit(X_train, y_train)
val_preds = model.predict(X_val)
test_preds = model.predict(X_test)
print(f"\nMLP | Activation: {cfg['activation']}, LR: {cfg['learning_rate_init']}, Layers:
{cfg['hidden_layer_sizes']}")
print("Validation Accuracy:", accuracy_score(y_val, val_preds))
print("Test Accuracy:", accuracy_score(y_test, test_preds))
print("Keshav Singh")

Output:

Code:
 #using Keras
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score
# Create sample data
X, y = make_classification(n_samples=300, n_features=10, n_informative=5,
random_state=42)
X_train_full, X_test, y_train_full, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train_full, y_train_full, test_size=0.25,
random_state=42)
# Convert targets to categorical
y_train_cat = to_categorical(y_train)
y_val_cat = to_categorical(y_val)
y_test_cat = to_categorical(y_test)
# Configuration list
configs = [
{"activation": "relu", "learning_rate": 0.01, "hidden_layers": [10]},
{"activation": "tanh", "learning_rate": 0.001, "hidden_layers": [5, 5]},
{"activation": "sigmoid", "learning_rate": 0.005, "hidden_layers": [20]},
]
# Train multiple models
for cfg in configs:
model = Sequential()
model.add(Dense(cfg["hidden_layers"][0], input_dim=10, activation=cfg["activation"]))
for layer_size in cfg["hidden_layers"][1:]:
model.add(Dense(layer_size, activation=cfg["activation"]))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer=Adam(learning_rate=cfg["learning_rate"]),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train_cat, epochs=50, verbose=0, validation_data=(X_val, y_val_cat))
val_preds = np.argmax(model.predict(X_val), axis=1)
test_preds = np.argmax(model.predict(X_test), axis=1)
print(f"\nKeras | Activation: {cfg['activation']}, LR: {cfg['learning_rate']}, Layers:
{cfg['hidden_layers']}")
print("Validation Accuracy:", accuracy_score(y_val, val_preds))
print("Test Accuracy:", accuracy_score(y_test, test_preds))
print("Abhinav Sushil Varshney")

Output:

Program -4
AIM: Design a CNN architecture to implement the image classification task over
an image dataset. Perform the Hyper-parameter tuning and record the results.
Code:
!pip install -q keras_tuner
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from kerastuner.tuners import RandomSearch
import matplotlib.pyplot as plt
import numpy as np
# Load dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize pixel values
x_train, x_test = x_train / 255.0, x_test / 255.0
# Convert labels to categorical
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Check shape
print("Training data shape:", x_train.shape)
print("Keshav Singh")

Output:

 Code:
def build_model(hp):
model = keras.Sequential()
# Convolutional layers
model.add(layers.Conv2D(
filters=hp.Int('conv_1_filter', min_value=32, max_value=128, step=16),
kernel_size=hp.Choice('conv_1_kernel', values = [3,5]),
activation='relu',
input_shape=(32,32,3)
))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(
filters=hp.Int('conv_2_filter', min_value=64, max_value=256, step=16),
kernel_size=hp.Choice('conv_2_kernel', values=[3,5]),
activation='relu'
))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
# Flatten and Dense layers
model.add(layers.Flatten())
model.add(layers.Dense(
units=hp.Int('dense_units', min_value=64, max_value=512, step=32),
activation='relu'
))
model.add(layers.Dense(10, activation='softmax'))
# Compile model
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
),
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
from kerastuner.tuners import RandomSearch
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=1,
directory='cnn_tuning',
project_name='image_classification'
)
tuner.search_space_summary()
# Begin search
tuner.search(x_train, y_train,
epochs=10,
validation_split=0.2,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)])
print("Abhinav Sushil Varshney")

Output:

 Code:
best_model = tuner.get_best_models(num_models=1)[0]
best_hyperparams = tuner.get_best_hyperparameters(num_trials=1)[0]
print("Best Hyperparameters:")
print(best_hyperparams.values)
# Evaluate
test_loss, test_acc = best_model.evaluate(x_test, y_test)
print("\nTest Accuracy:", test_acc)
print("Abhinav Sushil Varshney")
Output:

 Code:
history = best_model.fit(x_train, y_train, epochs=10, validation_split=0.2)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Val Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.title('Accuracy over Epochs')
plt.show()
print("Abhinav Sushil Varshney")

Output:

Program -5
AIM: Load the MNIST dataset and normalize the pixel values between 0 and 1.
Visualize one example digit image from the dataset with its corresponding label.
Manually apply a Sobel filter to detect vertical edges in an MNIST digit image using
convolve2d, and display both the original and edge-detected images.
Build and train a Convolutional Neural Network (CNN) with the following
architecture:
Input layer for images of shape (28, 28, 1)
1st Convolutional layer with 8 filters of size 3×3 and ReLU activation
MaxPooling layer of size 2×2
2nd Convolutional layer with 16 filters of size 3×3 and ReLU activation
MaxPooling layer of size 2×2
Flatten layer
Dense layer with 32 neurons and ReLU activation
Output Dense layer with 10 neurons (Softmax activation)
Compile and train the CNN using the Adam optimizer for 3 epochs, and plot the
training and validation accuracy curves.
Visualize the learned filters from the first convolutional layer.
Visualize the feature maps of the first two convolutional layers for any one image
from the test set.

Code:
# STEP 1: Import Libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers, models, Input
from tensorflow.keras.datasets import mnist
from scipy.signal import convolve2d
from tensorflow.keras import Model
# STEP 2: Load Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
# Reshape for CNN (batch, height, width, channels)
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
# STEP 3: Show Example Image
plt.imshow(x_train[0].reshape(28,28), cmap='gray')
plt.title(f"Label: {y_train[0]}")
plt.show()
# STEP 4: Manual Convolution (Sobel Edge Detection Example)
image = x_train[0].reshape(28,28)
sobel_x = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
edge_image = convolve2d(image, sobel_x, mode='same', boundary='fill', fillvalue=0)
plt.subplot(1,2,1)
plt.imshow(image, cmap='gray')
plt.title("Original")
plt.subplot(1,2,2)
plt.imshow(edge_image, cmap='gray')
plt.title("Edges (Sobel X)")
plt.show()
# STEP 5: Build a Simple CNN with Explicit Input Layer
inputs = Input(shape=(28,28,1))
x = layers.Conv2D(8, (3,3), activation='relu')(inputs)
x = layers.MaxPooling2D((2,2))(x)
x = layers.Conv2D(16, (3,3), activation='relu')(x)
x = layers.MaxPooling2D((2,2))(x)
x = layers.Flatten()(x)
x = layers.Dense(32, activation='relu')(x)
outputs = layers.Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# STEP 6: Train CNN
history = model.fit(x_train, y_train, epochs=3, validation_data=(x_test, y_test))
# STEP 7: Plot Accuracy
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Test Accuracy')
plt.legend()
plt.show()
# STEP 8: Visualize Filters of First Layer
filters, biases = model.layers[1].get_weights() # First Conv2D is now model.layers[1]
filters = (filters - filters.min()) / (filters.max() - filters.min()) # normalize
fig, axs = plt.subplots(1,8, figsize=(20,5))
for i in range(8):
axs[i].imshow(filters[:,:,0,i], cmap='gray')
axs[i].set_title(f"Filter {i+1}")
axs[i].axis('off')
plt.show()
# STEP 9: Visualize Feature Maps for One Image
layer_outputs = [layer.output for layer in model.layers if isinstance(layer, layers.Conv2D)]
activation_model = Model(inputs=model.input, outputs=layer_outputs)
img = x_test[0].reshape(1,28,28,1)
activations = activation_model.predict(img)
for layer_num, feature_map in enumerate(activations):
num_filters = feature_map.shape[-1]
fig, axs = plt.subplots(1, num_filters, figsize=(20,5))
for i in range(num_filters):
axs[i].imshow(feature_map[0, :, :, i], cmap='gray')
axs[i].axis('off')
plt.suptitle(f"Feature Maps - Conv Layer {layer_num+1}")
plt.show()
print(“Keshav Singh”)
Output:

 Program -6
AIM: CNN on Fashion MNIST + Horizontal Edge Detection
Load the Fashion MNIST dataset froConvras.
Normalize pixel values and reshape the data for CNN input.
Visualize one sample image and its label.
Apply a Sobel filter for horizontal edge detection manually using convolve2d and
display original vs. edge-detected images.
Build a CNN with:
Conv2D (16 filters, 3×3, ReLU) → MaxPooling (2×2)
Conv2D (32 filters, 3×3, ReLU) → MaxPooling (2×2)
Flatten → Dense (64, ReLU) → Dense (10, Softmax)
Train for 5 epochs and plot training vs validation accuracy.
Visualize filters from the first Conv layer and feature maps for a test image.

Code:
import tensorflow as tf
(train_images, train_labels), (test_images, test_labels) =
tf.keras.datasets.fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images = train_images.reshape((train_images.shape[0], 28, 28, 1))
test_images = test_images.reshape((test_images.shape[0], 28, 28, 1))
print("Train images shape:", train_images.shape)
print("Test images shape:", test_images.shape)
print("Abhinav Sushil Varshney")

Output:

Code:
import matplotlib.pyplot as plt
plt.imshow(train_images[0].reshape(28, 28), cmap='gray')
plt.colorbar()
plt.grid(False)
plt.show()
print("Label:", train_labels[0])
print("Abhinav Sushil Varshney")
Output:

Code:
from scipy.signal import convolve2d
import numpy as np
import matplotlib.pyplot as plt
sobel_filter = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
sample_image = train_images[0].reshape(28, 28)
edge_detected_image = convolve2d(sample_image, sobel_filter, mode='same',
boundary='symm')
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
axes[0].imshow(sample_image, cmap='gray')
axes[0].set_title("Original Image")
axes[0].axis('off')
axes[1].imshow(edge_detected_image, cmap='gray')
axes[1].set_title("Horizontal Edge Detection")
axes[1].axis('off')
plt.tight_layout()
plt.show()
print("Abhinav Sushil Varshney")
Output:

Code:
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.models import Sequential
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=5)
print("Abhinav Sushil Varshney")
Output:

Code:
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'], label='accuracy')
# plt.plot(history.history['val_accuracy'], label = 'val_accuracy') # Removed this line
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training Accuracy over Epochs') # Updated title
plt.legend(loc='lower right')
plt.show()
print("Abhinav Sushil Varshney")

Output:
Code:
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
first_conv_layer_weights = model.layers[0].get_weights()[0]
num_filters = first_conv_layer_weights.shape[-1]
fig, axes = plt.subplots(1, num_filters, figsize=(num_filters * 2, 2))
for i in range(num_filters):
filter_weights = first_conv_layer_weights[:, :, :, i].squeeze()
axes[i].imshow(filter_weights, cmap='gray')
axes[i].set_title(f'Filter {i+1}')
axes[i].axis('off')
plt.tight_layout()
plt.show()
print("Abhinav Sushil Varshney")

Output:

Code:
test_image = test_images[0]
test_image_reshaped = test_image.reshape(1, 28, 28, 1)
layer_name = model.layers[0].name
feature_map_model = tf.keras.Model(inputs=model.inputs,
outputs=model.get_layer(layer_name).output)
feature_maps = feature_map_model.predict(test_image_reshaped)
feature_maps = feature_maps.squeeze()
num_feature_maps = feature_maps.shape[-1]
fig, axes = plt.subplots(4, num_feature_maps // 4, figsize=(num_feature_maps // 4 * 2, 8))
axes = axes.ravel()
for i in range(num_feature_maps):
axes[i].imshow(feature_maps[:, :, i], cmap='viridis')
axes[i].set_title(f'Feature Map {i+1}')
axes[i].axis('off')
plt.tight_layout()
plt.show()
print("Abhinav Sushil Varshney")
Output:

You might also like