Programming for Artificial Intelligence.
Submitted by
[Link](2022-cs-438)
Under the Guidance of
Sir Abdul Jaleel.
Department of Computer Science.
In Partial Fulfillment of the
Requirements for the Award of
Degree of
Bachelor of Technology
DEPARTMENT OF COMPUTER SCIENCE.
UNIVERSITY OF ENGINEERING AND
TECHNOLOGY,LAHORE(jan-apr) 2025
Task 1
# Importing libraries for image processing and plotting
from [Link] import load_img, img_to_array # load_img loads an image file;
img_to_array converts it into a numerical array format
import os # Provides functions to interact with the operating system (like reading file directories)import
[Link] as plt # Library for creating visualizations and plots
# Set the style for all plots to a dark background for better contrast
[Link]('dark_background')
# Visualizing Healthy Spiral Images
# ----------------------------
[Link](figsize=(12,12)) # Create a new figure with a width and height of 12 inches each
for i in range(1, 10): # Loop over image indices from 1 to 9 (assuming indexing starts at 0; adjust as
needed)
[Link](3, 3, i) # Create a subplot in a 3x3 grid and select the i-th subplot for plotting the image
# Construct the path for the i-th healthy spiral image by combining the directory path and the list of file
names
img = load_img(
"../input/parkinsons-drawings/spiral/training/healthy/" +
[Link]("../input/parkinsons-drawings/spiral/training/healthy")[i]
[Link](img) # Display the loaded image in the current subplot
[Link]() # Render and display the entire figure with all 9 images
# ----------------------------
# Visualizing Parkinson Spiral Images
# ----------------------------
[Link](figsize=(12,12)) # Create a new figure for spiral images of Parkinson patients
for i in range(1, 10): # Loop over the first 9 images
[Link](3, 3, i) # Create a subplot grid of 3 rows and 3 columns, positioning each image
accordingly
# Construct the path for the i-th Parkinson spiral image from the appropriate folder
img = load_img(
"../input/parkinsons-drawings/spiral/training/parkinson/" +
[Link]("../input/parkinsons-drawings/spiral/training/parkinson")[i]
[Link](img) # Plot the image in the subplot
[Link]() # Show the figure containing the Parkinson spiral images
# ----------------------------
# Visualizing Healthy Wave Images
# ----------------------------
[Link](figsize=(12,12)) # Create a figure for healthy wave images
for i in range(1, 10): # Loop through indices 1 to 9 for healthy wave images
[Link](3, 3, i) # Set up a 3x3 subplot grid
# Create the file path for each healthy wave image by concatenating the base directory and the specific
file name
img = load_img(
"../input/parkinsons-drawings/wave/training/healthy/" +
[Link]("../input/parkinsons-drawings/wave/training/healthy")[i]
[Link](img) # Display the image
[Link]() # Display the overall figure for healthy wave images
# ----------------------------
# Visualizing Parkinson Wave Images
# ----------------------------
[Link](figsize=(12,12)) # Create a figure for Parkinson wave images
for i in range(1, 10): # Loop over the first 9 images from the Parkinson wave folder
[Link](3, 3, i) # Prepare the subplot in a 3x3 grid
# Build the full file path for the image from the Parkinson wave training directory
img = load_img(
"../input/parkinsons-drawings/wave/training/parkinson/" +
[Link]("../input/parkinsons-drawings/wave/training/parkinson")[i]
[Link](img) # Display the image on the subplot
[Link]() # Show all the subplots in one figure
# ----------------------------
# Importing CNN Layers and Model Classes from Keras
# ----------------------------
from [Link] import Sequential # Import the Sequential model type for stacking layers linearly
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense # Import various layers: Conv2D for
convolutions, MaxPooling2D for pooling, Flatten to convert image to vector, Dense for fully connected
layers
# ----------------------------
# Building the Classifier Model
# ----------------------------
classifier = Sequential() # Initialize a sequential model for the CNN
# Add the first convolutional layer with 32 filters, a 3x3 kernel, and ReLU activation function; specify the
input shape
[Link](Conv2D(32, (3,3), input_shape=(128, 128, 3), activation='relu'))
# Add a max pooling layer with a pool size of 2x2 to reduce spatial dimensions
[Link](MaxPooling2D(pool_size=(2,2)))
# Add a second convolutional layer with 32 filters and a 3x3 kernel; use ReLU activation
[Link](Conv2D(32, (3,3), activation='relu'))
# Add another max pooling layer with a 2x2 pool size
[Link](MaxPooling2D(pool_size=(2,2)))
# Flatten the 2D features from the convolutional layers into a 1D feature vector
[Link](Flatten())
# Add a fully connected (dense) layer with 128 units and ReLU activation
[Link](Dense(activation='relu', units=128))
# Add the output layer with 1 unit (for binary classification) using a sigmoid activation function
[Link](Dense(activation='sigmoid', units=1))
# ----------------------------
# Preparing Image Data Generation for Training and Testing
# ----------------------------
from [Link] import ImageDataGenerator # Import for real-time data augmentation
# Create an ImageDataGenerator for training that rescales pixel values and applies data augmentation
transformations
train_datagen = ImageDataGenerator(
rescale=1./255, # Scale image pixel values to be between 0 and 1
shear_range=0.2, # Randomly shear images
zoom_range=0.2, # Randomly zoom images
horizontal_flip=True # Randomly flip images horizontally
# Create an ImageDataGenerator for testing that only rescales the images
test_datagen = ImageDataGenerator(rescale=1./255)
# Set up the generator for spiral training images
spiral_train_generator = train_datagen.flow_from_directory(
'../input/parkinsons-drawings/spiral/training', # Directory for spiral training images
target_size=(128,128), # Resize all images to 128x128 pixels
batch_size=32, # Use batches of 32 images
class_mode='binary' # Indicate binary classification (healthy vs. parkinson)
# Set up the generator for spiral testing images
spiral_test_generator = test_datagen.flow_from_directory(
'../input/parkinsons-drawings/spiral/testing', # Directory for spiral testing images
target_size=(128,128),
batch_size=32,
class_mode='binary'
# Set up the generator for wave training images
wave_train_generator = train_datagen.flow_from_directory(
'../input/parkinsons-drawings/wave/training', # Directory for wave training images
target_size=(128,128),
batch_size=32,
class_mode='binary'
# Set up the generator for wave testing images
wave_test_generator = test_datagen.flow_from_directory(
'../input/parkinsons-drawings/wave/testing', # Directory for wave testing images
target_size=(128,128),
batch_size=32,
class_mode='binary'
# ----------------------------
# Fitting the Model using the Training Data
# ----------------------------
from [Link] import Adam # Import the Adam optimizer
from [Link] import EarlyStopping, ReduceLROnPlateau # Import callbacks for early stopping
and learning rate reduction
# Create an EarlyStopping callback to halt training when validation loss stops improving
early_stopping = EarlyStopping(
monitor='val_loss', # Monitor validation loss
min_delta=0, # Minimum change to qualify as an improvement
patience=3, # Number of epochs with no improvement after which training will be stopped
verbose=1, # Verbose output for the callback
restore_best_weights=True # Restore model weights from the epoch with the best value of the
monitored quantity
# Create a ReduceLROnPlateau callback to reduce the learning rate when a metric has stopped improving
reduce_learningrate = ReduceLROnPlateau(
monitor='val_loss', # Monitor validation loss
factor=0.2, # Factor by which the learning rate will be reduced (new_lr = lr * factor)
patience=3, # Number of epochs with no improvement before reducing the learning rate
verbose=1, # Verbose output for the callback
min_delta=0.0001 # Threshold for measuring the new optimum, to only focus on significant changes
)
# Group callbacks into a list to pass them together
callbacks_list = [early_stopping, reduce_learningrate]
# Define the number of training epochs
epochs = 48
# Compile the classifier with binary cross-entropy loss and Adam optimizer
[Link](
loss='binary_crossentropy', # Loss function for binary classification
optimizer=Adam(lr=0.001), # Adam optimizer with a specified learning rate
metrics=['accuracy'] # Metric to monitor during training
# Fit the model using the spiral training images, validating with spiral testing images
history = classifier.fit_generator(
spiral_train_generator, # Training data generator
steps_per_epoch=spiral_train_generator.n // spiral_train_generator.batch_size, # Total steps per epoch
epochs=epochs, # Total number of epochs
validation_data=spiral_test_generator, # Validation data generator
validation_steps=spiral_test_generator.n // spiral_test_generator.batch_size, # Steps for validation
callbacks=callbacks_list # Callbacks for early stopping and learning rate reduction
# ----------------------------
# Plotting the Training Accuracy and Loss Curves
# ----------------------------
[Link]('dark_background') # Set the plot style again for consistency
[Link](figsize=(12,6)) # Create a figure for plotting training history
# Plot the Training Accuracy curve
[Link](1,2,1) # Create a subplot for accuracy on the left side
[Link]('Accuracy', fontsize=16) # Label the y-axis
[Link]([Link]['accuracy'], label='Training Accuracy', color='green') # Plot accuracy with a green
line
[Link](loc='lower right') # Place the legend in the lower-right corner
# Plot the Training Loss curve
[Link](1,2,2) # Create a subplot for loss on the right side
[Link]('Loss', fontsize=16) # Label the y-axis
[Link]([Link]['loss'], label='Training Loss', color='red') # Plot loss with a red line
[Link](loc='lower right') # Place the legend
[Link]() # Show the complete figure with both subplots
Task 2(github):
Parkinson’s Disease Detection Using CNN - Fully Commented Code
# Importing necessary libraries
import numpy as np # For numerical operations
import [Link] as plt # For plotting graphs and images
import os # For interacting with the operating system and file directories
import cv2 # OpenCV for image processing
from [Link] import to_categorical # To convert labels to one-hot encoding
from [Link] import Sequential # For building the CNN model
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout # CNN layers
# Defining the data directory containing the dataset
data_path = '/content/drive/MyDrive/parkinsons_dataset'
# Creating two lists to store images and their labels
images = []
labels = []
# Loop through each folder in the dataset directory
for folder in [Link](data_path):
folder_path = [Link](data_path, folder) # Full path to current folder
label = int([Link]('_')[0]) # Extract label from folder name (0 or 1)
# Loop through each image file in the current folder
for file in [Link](folder_path):
file_path = [Link](folder_path, file) # Full path to image file
image = [Link](file_path) # Read the image using OpenCV
image = [Link](image, (100, 100)) # Resize image to 100x100 pixels
[Link](image) # Add image to list
[Link](label) # Add corresponding label
# Convert lists to numpy arrays for processing
images = [Link](images)
labels = [Link](labels)
# Normalize image pixel values to the range 0 to 1
images = images / 255.0
# Convert labels to categorical (one-hot encoded) for classification
labels = to_categorical(labels)
# Splitting dataset into training and testing sets
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
# Building the CNN model using Keras Sequential API
model = Sequential()
# First convolutional layer with 32 filters and ReLU activation
[Link](Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
# Max pooling layer to reduce spatial dimensions
[Link](MaxPooling2D(pool_size=(2, 2)))
# Second convolutional layer with 64 filters
[Link](Conv2D(64, (3, 3), activation='relu'))
# Another max pooling layer
[Link](MaxPooling2D(pool_size=(2, 2)))
# Flatten the 2D features into 1D for the fully connected layer
[Link](Flatten())
# Fully connected (dense) layer with 128 neurons
[Link](Dense(128, activation='relu'))
# Dropout layer to reduce overfitting
[Link](Dropout(0.5))
# Output layer with 2 neurons (since we have 2 classes) and softmax activation
[Link](Dense(2, activation='softmax'))
# Compiling the model with loss function, optimizer, and evaluation metric
[Link](loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Training the CNN model on training data
[Link](x_train, y_train, epochs=10, validation_data=(x_test, y_test))
# Evaluate model performance on test set
loss, accuracy = [Link](x_test, y_test)
print("Test Accuracy: {:.2f}%".format(accuracy * 100))
Task 3:
"MRI Image Preprocessing Pipeline for Deep Learning"
# MRI Image Preprocessing and Augmentation Pipeline
# Author: Huzaiifa
# Description: This script handles DICOM to PNG conversion, normalization, resizing, and augmentation
for MRI image datasets.
import os
import pydicom
import numpy as np
from PIL import Image
import random
# Function to convert DICOM files to PNG format
def convert_dicom_to_png(dicom_dir, png_dir):
if not [Link](png_dir):
[Link](png_dir)
for filename in [Link](dicom_dir):
if [Link]().endswith(".dcm"):
dicom_path = [Link](dicom_dir, filename)
dicom_image = [Link](dicom_path)
image_array = dicom_image.pixel_array.astype(float)
# Normalize pixel values to 0-255 and convert to uint8
normalized_image = ((image_array - [Link](image_array)) / ([Link](image_array) -
[Link](image_array))) * 255.0
image_uint8 = normalized_image.astype(np.uint8)
# Convert array to PIL image and save as PNG
image = [Link](image_uint8)
output_path = [Link](png_dir, [Link](filename)[0] + ".png")
[Link](output_path)
# Function to normalize images to pixel range [0, 1]
def normalize_images(image_dir):
for filename in [Link](image_dir):
if [Link]().endswith(('.png', '.jpg', '.jpeg')):
path = [Link](image_dir, filename)
image = [Link](path).convert("L")
array = [Link](image).astype(np.float32) / 255.0
image = [Link]((array * 255).astype(np.uint8))
[Link](path)
# Function to resize images to a fixed dimension
def resize_images(image_dir, output_size=(224, 224)):
for filename in [Link](image_dir):
if [Link]().endswith(('.png', '.jpg', '.jpeg')):
path = [Link](image_dir, filename)
image = [Link](path).convert("L")
image = [Link](output_size)
[Link](path)
# Function to apply data augmentation techniques to the dataset
def augment_images(input_dir, output_dir, augmentations, num_augmented=3):
if not [Link](output_dir):
[Link](output_dir)
for filename in [Link](input_dir):
if [Link]().endswith(('.png', '.jpg', '.jpeg')):
img_path = [Link](input_dir, filename)
image = [Link](img_path).convert("L")
for i in range(num_augmented):
aug_img = [Link]()
# Random rotation
if [Link]() < [Link]("rotation_probability", 0.5):
angle = [Link](
-[Link]("rotation_angle", 30),
[Link]("rotation_angle", 30)
)
aug_img = aug_img.rotate(angle)
# Random horizontal flip
if [Link]() < [Link]("flip_probability", 0.5):
aug_img = aug_img.transpose(Image.FLIP_LEFT_RIGHT)
# Random vertical flip
if [Link]() < [Link]("flip_probability", 0.5):
aug_img = aug_img.transpose(Image.FLIP_TOP_BOTTOM)
# Optional: Zoom augmentation
if [Link]() < [Link]("zoom_probability", 0.3):
zoom_factor = [Link]("zoom_factor", 1.2)
w, h = aug_img.size
zoom_w, zoom_h = int(w / zoom_factor), int(h / zoom_factor)
cropped = aug_img.crop(((w - zoom_w) // 2, (h - zoom_h) // 2, (w + zoom_w) // 2, (h +
zoom_h) // 2))
aug_img = [Link]((w, h), [Link])
# Save the augmented image
aug_img.save([Link](output_dir, f"aug_{i}_{filename}"))
# Example usage:
# Step 1: Convert DICOM images to PNG format
# convert_dicom_to_png("DICOM_PD", "PNG_PD")
# Step 2: Normalize PNG images to range [0, 1]
# normalize_images("PNG_PD")
# Step 3: Resize images to 224x224 resolution
# resize_images("PNG_PD")
# Step 4: Augment the dataset
# augment_images(
# input_dir="PNG_PD",
# output_dir="Augmented_PD",
# augmentations={
# "rotation_probability": 0.7,
# "rotation_angle": 20,
# "flip_probability": 0.5,
# "zoom_probability": 0.3,
# "zoom_factor": 1.1
# },
# num_augmented=5
Output Images: