PROG 1:
import numpy as np
# Activation Functions
def sigmoid(x):
return 1 / (1 + [Link](-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Initialize Parameters
input_size = 2
hidden_size = 4
output_size = 1
# Set seed for reproducibility
[Link](42)
# Initialize weights and biases
weights_input_hidden = [Link](input_size, hidden_size)
weights_hidden_output = [Link](hidden_size, output_size)
bias_hidden = [Link](hidden_size)
bias_output = [Link](output_size)
# Training Data (XOR problem)
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([[0], [1], [1], [0]])
# Hyperparameters
learning_rate = 0.5
epochs = 10000
# Training Process
for epoch in range(epochs):
# Forward Propagation
hidden_input = [Link](X, weights_input_hidden) + bias_hidden
hidden_output = sigmoid(hidden_input)
output_input = [Link](hidden_output, weights_hidden_output) + bias_output
output = sigmoid(output_input)
# Compute the error
error = y - output
mean_squared_error = [Link]([Link](error))
# Backpropagation
# Output layer error term
output_error_term = error * sigmoid_derivative(output)
# Hidden layer error term
hidden_error_term = [Link](output_error_term, weights_hidden_output.T) *
sigmoid_derivative(hidden_output)
# Update weights and biases
weights_hidden_output += [Link](hidden_output.T, output_error_term) * learning_rate
weights_input_hidden += [Link](X.T, hidden_error_term) * learning_rate
bias_output += [Link](output_error_term, axis=0) * learning_rate
bias_hidden += [Link](hidden_error_term, axis=0) * learning_rate
# Optionally print the error every 1000 epochs
if epoch % 1000 == 0:
print(f"Epoch {epoch}, Mean Squared Error: {mean_squared_error}")
# Testing
hidden_input = [Link](X, weights_input_hidden) + bias_hidden
hidden_output = sigmoid(hidden_input)
output_input = [Link](hidden_output, weights_hidden_output) + bias_output
output = sigmoid(output_input)
print("\nFinal output after training:")
print(output)
OUT PUT
Epoch 0, Mean Squared Error: 0.3855473441393805
Epoch 1000, Mean Squared Error: 0.016809009682083754
Epoch 2000, Mean Squared Error: 0.002462542360128216
Epoch 3000, Mean Squared Error: 0.0011878110656475828
Epoch 4000, Mean Squared Error: 0.0007618714201430668
Epoch 5000, Mean Squared Error: 0.0005545806141890376
Epoch 6000, Mean Squared Error: 0.0004334418242331799
Epoch 7000, Mean Squared Error: 0.000354506781005382
Epoch 8000, Mean Squared Error: 0.00029921542614632393
Epoch 9000, Mean Squared Error: 0.0002584379798477225
Final output after training:
[[0.016172 ]
[0.98334387]
[0.98758925]
[0.01468812]]
Program 2:
import numpy as np
# Activation Functions
def sigmoid(x):
return 1 / (1 + [Link](-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Neural Network Parameters
input_size = 2
hidden_size = 4
output_size = 1
# Initialize weights and biases
[Link](42) # Seed for reproducibility
weights_input_hidden = [Link](input_size, hidden_size)
weights_hidden_output = [Link](hidden_size, output_size)
bias_hidden = [Link](hidden_size)
bias_output = [Link](output_size)
# Training Data (XOR problem)
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([[0], [1], [1], [0]])
# Forward Propagation Function
def forward_propagation(X):
# Calculate hidden layer activations
hidden_input = [Link](X, weights_input_hidden) + bias_hidden
hidden_output = sigmoid(hidden_input)
# Calculate output layer activations
output_input = [Link](hidden_output, weights_hidden_output) + bias_output
output = sigmoid(output_input)
return output
# Test the network
predictions = forward_propagation(X)
print("Final Output after forward propagation:")
print(predictions)
OUTPUT:
Final Output after forward propagation:
[[0.83958628]
[0.86884257]
[0.87938323]
[0.89751522]]
PROG 3:
import numpy as np
import pandas as pd
from [Link] import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from [Link] import accuracy_score, classification_report, confusion_matrix
import seaborn as sns
import [Link] as plt
# Step 1: Load and Preprocess Data
# Load Iris dataset
iris = load_iris()
X = [Link]
y = [Link]
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Step 2: Build and Train the Model
# Initialize the neural network classifier
model = MLPClassifier(hidden_layer_sizes=(10, 10), max_iter=500, random_state=42)
# Train the model
[Link](X_train, y_train)
# Step 3: Evaluate Model Performance
# Make predictions
y_pred = [Link](X_test)
# Evaluate performance
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
# Classification report
print("\nClassification Report:")
print(classification_report(y_test, y_pred, target_names=iris.target_names))
# Confusion matrix
conf_matrix = confusion_matrix(y_test, y_pred)
# Plot confusion matrix
[Link](figsize=(8, 6))
[Link](conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=iris.target_names,
yticklabels=iris.target_names)
[Link]('Predicted Label')
[Link]('True Label')
[Link]('Confusion Matrix')
[Link]()
# Step 4: Apply the Model
# Example: Predict on a new sample
new_samples = [Link]([[5.1, 3.5, 1.4, 0.2], [6.7, 3.0, 5.2, 2.3]])
predictions = [Link](new_samples)
predicted_labels = [iris.target_names[p] for p in predictions]
print("\nPredictions for new samples:")
for sample, label in zip(new_samples, predicted_labels):
print(f"Sample: {sample}, Predicted Label: {label}")
Output
Accuracy: 0.96
Classification Report:
precision recall f1-score support
setosa 1.00 1.00 1.00 19
versicolor 0.92 0.92 0.92 13
virginica 0.92 0.92 0.92 13
accuracy 0.96 45
macro avg 0.95 0.95 0.95 45
weighted avg 0.96 0.96 0.96 45
C:\Users\hp\anaconda3\lib\site-packages\sklearn\neural_network\_multilayer_perceptron.py:684:
ConvergenceWarning: Stochastic Optimizer: Maximum iterations (500) reached and the optimization
hasn't converged yet.
[Link](
Predictions for new samples:
Sample: [5.1 3.5 1.4 0.2], Predicted Label: setosa
Sample: [6.7 3. 5.2 2.3], Predicted Label: virginica
Program 8
import numpy as np
import [Link] as plt
from [Link] import mnist
from [Link] import Model, Sequential
from [Link] import Dense, Input, Flatten, Reshape
# Step 1: Load and Preprocess the Dataset
(X_train, _), (X_test, _) = mnist.load_data()
# Normalize the pixel values to be between 0 and 1
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
# Flatten the 28x28 images to vectors of size 784
X_train_flat = X_train.reshape((X_train.shape[0], 28 * 28))
X_test_flat = X_test.reshape((X_test.shape[0], 28 * 28))
# Step 2: Build the Autoencoder Model
# Define the size of the encoded representations
encoding_dim = 32
# Encoder: Input -> Dense layer
input_img = Input(shape=(784,))
encoded = Dense(encoding_dim, activation='relu')(input_img)
# Decoder: Dense layer -> Output
decoded = Dense(784, activation='sigmoid')(encoded)
# Autoencoder model
autoencoder = Model(input_img, decoded)
# Encoder model
encoder = Model(input_img, encoded)
# Decoder model
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = [Link][-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
# Step 3: Compile and Train the Model
[Link](optimizer='adam', loss='binary_crossentropy')
history = [Link](X_train_flat, X_train_flat,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(X_test_flat, X_test_flat))
# Step 4: Evaluate and Visualize the Model
# Encode and decode some digits
encoded_imgs = [Link](X_test_flat)
decoded_imgs = [Link](encoded_imgs)
# Display original and reconstructed images
n = 10 # Number of digits to display
[Link](figsize=(20, 4))
for i in range(n):
# Display original
ax = [Link](2, n, i + 1)
[Link](X_test[i], cmap='gray')
[Link]("Original")
[Link]('off')
# Display reconstruction
ax = [Link](2, n, i + 1 + n)
[Link](decoded_imgs[i].reshape(28, 28), cmap='gray')
[Link]("Reconstructed")
[Link]('off')
[Link]()
# Plot training and validation loss
[Link](figsize=(8, 4))
[Link]([Link]['loss'], label='Training Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Model Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
output:
Downloading data from [Link]
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 3s 0us/step
Epoch 1/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.3858 - val_loss: 0.1904
Epoch 2/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1791 - val_loss: 0.1524
Epoch 3/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1481 - val_loss: 0.1335
Epoch 4/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1314 - val_loss: 0.1206
Epoch 5/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1196 - val_loss: 0.1117
Epoch 6/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1115 - val_loss: 0.1060
Epoch 7/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1062 - val_loss: 0.1020
Epoch 8/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1024 - val_loss: 0.0989
Epoch 9/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0994 - val_loss: 0.0968
Epoch 10/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0976 - val_loss: 0.0953
Epoch 11/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0962 - val_loss: 0.0944
Epoch 12/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0953 - val_loss: 0.0937
Epoch 13/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0949 - val_loss: 0.0933
Epoch 14/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0944 - val_loss: 0.0930
Epoch 15/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0940 - val_loss: 0.0928
Epoch 16/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0938 - val_loss: 0.0926
Epoch 17/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0939 - val_loss: 0.0924
Epoch 18/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0936 - val_loss: 0.0924
Epoch 19/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0933 - val_loss: 0.0922
Epoch 20/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0935 - val_loss: 0.0922
Epoch 21/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0931 - val_loss: 0.0922
Epoch 22/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0932 - val_loss: 0.0920
Epoch 23/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0932 - val_loss: 0.0920
Epoch 24/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0932 - val_loss: 0.0920
Epoch 25/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0929 - val_loss: 0.0919
Epoch 26/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0929 - val_loss: 0.0918
Epoch 27/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0929 - val_loss: 0.0918
Epoch 28/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0929 - val_loss: 0.0919
Epoch 29/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0928 - val_loss: 0.0918
Epoch 30/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0929 - val_loss: 0.0918
Epoch 31/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0917
Epoch 32/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0928 - val_loss: 0.0917
Epoch 33/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0916
Epoch 34/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0917
Epoch 35/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0928 - val_loss: 0.0916
Epoch 36/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0916
Epoch 37/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0917
Epoch 38/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0916
Epoch 39/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0928 - val_loss: 0.0916
Epoch 40/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0930 - val_loss: 0.0916
Epoch 41/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0916
Epoch 42/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0925 - val_loss: 0.0916
Epoch 43/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0926 - val_loss: 0.0915
Epoch 44/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0927 - val_loss: 0.0916
Epoch 45/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0926 - val_loss: 0.0915
Epoch 46/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0925 - val_loss: 0.0915
Epoch 47/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0925 - val_loss: 0.0914
Epoch 48/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0928 - val_loss: 0.0915
Epoch 49/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0928 - val_loss: 0.0914
Epoch 50/50
235/235 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0924 - val_loss: 0.0914
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 542us/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 541us/step
Program 7
import numpy as np
import [Link] as plt
from [Link] import Sequential
from [Link] import SimpleRNN, Dense
from [Link] import MinMaxScaler
from [Link] import mean_squared_error
# Step 1: Generate Sine Wave Data
def generate_sine_wave_data(seq_length=50, n_samples=1000):
x = [Link](0, 4 * [Link], n_samples)
y = [Link](x)
data = []
labels = []
for i in range(len(y) - seq_length):
[Link](y[i:i + seq_length])
[Link](y[i + seq_length])
data = [Link](data)
labels = [Link](labels)
return data, labels
# Generate data
seq_length = 50
data, labels = generate_sine_wave_data(seq_length=seq_length)
# Step 2: Prepare the Data
# Split the data into training and testing sets
split_ratio = 0.8
train_size = int(len(data) * split_ratio)
X_train, X_test = data[:train_size], data[train_size:]
y_train, y_test = labels[:train_size], labels[train_size:]
# Reshape data to fit the RNN input requirements
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
# Step 3: Build the RNN Model
model = Sequential([
SimpleRNN(50, activation='tanh', input_shape=(seq_length, 1)),
Dense(1)
])
# Step 4: Compile the Model
[Link](optimizer='adam', loss='mean_squared_error')
# Step 5: Train the Model
history = [Link](X_train, y_train, epochs=50, batch_size=32, validation_data=(X_test, y_test))
# Step 6: Evaluate the Model
y_pred = [Link](X_test)
# Compute Mean Squared Error
mse = mean_squared_error(y_test, y_pred)
print(f'Mean Squared Error: {mse:.4f}')
# Step 7: Visualize the Predictions
[Link](figsize=(12, 6))
[Link](y_test, label='True Values')
[Link](y_pred, label='Predicted Values', linestyle='--')
[Link]('True vs Predicted Values')
[Link]('Time Step')
[Link]('Value')
[Link]()
[Link]()
# Plot training & validation loss values
[Link](figsize=(8, 4))
[Link]([Link]['loss'], label='Training Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Model Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Output
24/24 ━━━━━━━━━━━━━━━━━━━━ 1s 9ms/step - loss: 0.2213 - val_loss: 0.0017
Epoch 2/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.0036 - val_loss: 0.0012
Epoch 3/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 6.7757e-04 - val_loss: 3.7516e-04
Epoch 4/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.6849e-04 - val_loss: 2.5600e-04
Epoch 5/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.0873e-04 - val_loss: 2.1173e-04
Epoch 6/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 1.6173e-04 - val_loss: 1.5872e-04
Epoch 7/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.2255e-04 - val_loss: 1.4286e-04
Epoch 8/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.0702e-04 - val_loss: 1.0614e-04
Epoch 9/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 8.8214e-05 - val_loss: 8.8537e-05
Epoch 10/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 6.7436e-05 - val_loss: 6.7614e-05
Epoch 11/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.3747e-05 - val_loss: 6.6790e-05
Epoch 12/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.5459e-05 - val_loss: 5.5463e-05
Epoch 13/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.6370e-05 - val_loss: 4.5279e-05
Epoch 14/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.5487e-05 - val_loss: 3.7736e-05
Epoch 15/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.8997e-05 - val_loss: 4.4860e-05
Epoch 16/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.6576e-05 - val_loss: 3.7858e-05
Epoch 17/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 4.1060e-05 - val_loss: 3.6781e-05
Epoch 18/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.9181e-05 - val_loss: 2.8869e-05
Epoch 19/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.5702e-05 - val_loss: 2.5037e-05
Epoch 20/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.1272e-05 - val_loss: 2.2447e-05
Epoch 21/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.8112e-05 - val_loss: 2.0459e-05
Epoch 22/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.8562e-05 - val_loss: 2.3416e-05
Epoch 23/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.5786e-05 - val_loss: 1.8817e-05
Epoch 24/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.4590e-05 - val_loss: 1.7512e-05
Epoch 25/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.6073e-05 - val_loss: 2.0035e-05
Epoch 26/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.2106e-05 - val_loss: 1.5252e-05
Epoch 27/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.4050e-05 - val_loss: 1.5563e-05
Epoch 28/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 2.0582e-05 - val_loss: 1.5430e-05
Epoch 29/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.4643e-05 - val_loss: 1.9263e-05
Epoch 30/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.1752e-05 - val_loss: 1.3184e-05
Epoch 31/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.1689e-05 - val_loss: 1.5831e-05
Epoch 32/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.0718e-05 - val_loss: 1.0174e-05
Epoch 33/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 8.2172e-06 - val_loss: 1.0342e-05
Epoch 34/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 9.5107e-06 - val_loss: 1.9380e-05
Epoch 35/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.3584e-05 - val_loss: 9.8318e-06
Epoch 36/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 9.1223e-06 - val_loss: 9.0486e-06
Epoch 37/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 7.2069e-06 - val_loss: 1.4025e-05
Epoch 38/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 9.0311e-06 - val_loss: 1.8827e-05
Epoch 39/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.1375e-05 - val_loss: 1.6303e-05
Epoch 40/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 1.1201e-05 - val_loss: 8.3162e-06
Epoch 41/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.7165e-06 - val_loss: 6.9740e-06
Epoch 42/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 9.0723e-06 - val_loss: 6.9174e-06
Epoch 43/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.5903e-06 - val_loss: 8.7519e-06
Epoch 44/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 7.4875e-06 - val_loss: 7.3945e-06
Epoch 45/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.7357e-06 - val_loss: 6.0659e-06
Epoch 46/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.8534e-06 - val_loss: 1.3544e-05
Epoch 47/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 6.6726e-06 - val_loss: 7.7518e-06
Epoch 48/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 5.4372e-06 - val_loss: 4.8150e-06
Epoch 49/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.1051e-06 - val_loss: 6.0486e-06
Epoch 50/50
24/24 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 3.6223e-06 - val_loss: 4.5517e-06
WARNING:tensorflow:5 out of the last 320 calls to <function
TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at
0x000002A463EB4790> triggered [Link] retracing. Tracing is expensive and the excessive
number of tracings could be due to (1) creating @[Link] repeatedly in a loop, (2) passing tensors
with different shapes, (3) passing Python objects instead of tensors. For (1), please define your
@[Link] outside of the loop. For (2), @[Link] has reduce_retracing=True option that can
avoid unnecessary retracing. For (3), please refer to
[Link] and
[Link] for more details.
6/6 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step
Mean Squared Error: 0.0000
Program 6
import numpy as np
import [Link] as plt
from [Link] import mnist
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense
from [Link] import to_categorical
# Step 1: Load and Preprocess the Dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Reshape data to include a single channel (grayscale) and normalize
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)).astype('float32') / 255
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1)).astype('float32') / 255
# One-hot encode the labels
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Step 2: Build the CNN Model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])
# Step 3: Compile the Model
[Link](optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Step 4: Train the Model
history = [Link](X_train, y_train, epochs=10, batch_size=128, validation_data=(X_test, y_test))
# Step 5: Evaluate the Model
test_loss, test_acc = [Link](X_test, y_test, verbose=0)
print(f'Test accuracy: {test_acc:.4f}')
# Step 6: Visualize the Model's Performance
# Plot training & validation accuracy values
[Link](figsize=(12, 4))
[Link](1, 2, 1)
[Link]([Link]['accuracy'], label='Training Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Model Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
# Plot training & validation loss values
[Link](1, 2, 2)
[Link]([Link]['loss'], label='Training Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Model Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
# Display some predictions
predictions = [Link](X_test)
[Link](figsize=(10, 10))
for i in range(9):
[Link](3, 3, i + 1)
[Link](X_test[i].reshape(28, 28), cmap='gray')
[Link](f"Pred: {[Link](predictions[i])}, True: {[Link](y_test[i])}")
[Link]('off')
[Link]()
Output
469/469 ━━━━━━━━━━━━━━━━━━━━ 6s 10ms/step - accuracy: 0.8466 - loss: 0.5258 - val_accuracy:
0.9769 - val_loss: 0.0755
Epoch 2/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 11ms/step - accuracy: 0.9791 - loss: 0.0687 - val_accuracy:
0.9850 - val_loss: 0.0485
Epoch 3/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9845 - loss: 0.0491 - val_accuracy:
0.9875 - val_loss: 0.0371
Epoch 4/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9898 - loss: 0.0336 - val_accuracy:
0.9889 - val_loss: 0.0362
Epoch 5/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9915 - loss: 0.0267 - val_accuracy:
0.9888 - val_loss: 0.0362
Epoch 6/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9924 - loss: 0.0246 - val_accuracy:
0.9899 - val_loss: 0.0334
Epoch 7/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9943 - loss: 0.0192 - val_accuracy:
0.9879 - val_loss: 0.0416
Epoch 8/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 11ms/step - accuracy: 0.9955 - loss: 0.0143 - val_accuracy:
0.9901 - val_loss: 0.0315
Epoch 9/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9960 - loss: 0.0138 - val_accuracy:
0.9900 - val_loss: 0.0339
Epoch 10/10
469/469 ━━━━━━━━━━━━━━━━━━━━ 5s 10ms/step - accuracy: 0.9960 - loss: 0.0125 - val_accuracy:
0.9915 - val_loss: 0.0298
Test accuracy: 0.9915
WARNING:tensorflow:5 out of the last 13 calls to <function
TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at
0x000002A4007B4670> triggered [Link] retracing. Tracing is expensive and the excessive
number of tracings could be due to (1) creating @[Link] repeatedly in a loop, (2) passing tensors
with different shapes, (3) passing Python objects instead of tensors. For (1), please define your
@[Link] outside of the loop. For (2), @[Link] has reduce_retracing=True option that can
avoid unnecessary retracing. For (3), please refer to
[Link] and
[Link] for more details.
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Program 4
import numpy as np
import [Link] as plt
from [Link] import mnist
from [Link] import Sequential
from [Link] import Dense, Dropout, BatchNormalization
from [Link] import Adam
from [Link] import l1, l2
from [Link] import EarlyStopping
from [Link] import to_categorical
from [Link] import classification_report, accuracy_score
# Step 1: Load and Preprocess the Dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Normalize the pixel values to be between 0 and 1
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# Flatten the images to 1D vectors
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
# One-hot encode the labels
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Step 2: Build the Neural Network with Regularization and Model Enhancements
model = Sequential([
Dense(256, activation='relu', input_shape=(784,), kernel_regularizer=l2(0.001)),
BatchNormalization(),
Dropout(0.5),
Dense(128, activation='relu', kernel_regularizer=l1(0.001)),
BatchNormalization(),
Dropout(0.5),
Dense(64, activation='relu', kernel_regularizer=l2(0.001)),
BatchNormalization(),
Dropout(0.3),
Dense(10, activation='softmax')
])
# Step 3: Compile the Model
[Link](optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy',
metrics=['accuracy'])
# Step 4: Set up Early Stopping
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
# Step 5: Train the Model
history = [Link](X_train, y_train, epochs=50, batch_size=128, validation_data=(X_test, y_test),
callbacks=[early_stopping])
# Step 6: Evaluate the Model
test_loss, test_acc = [Link](X_test, y_test, verbose=0)
print(f'Test accuracy: {test_acc:.4f}')
# Classification report for detailed evaluation
y_pred = [Link](X_test)
y_pred_classes = [Link](y_pred, axis=1)
y_true = [Link](y_test, axis=1)
print("\nClassification Report:")
print(classification_report(y_true, y_pred_classes))
# Step 7: Visualize Training History
# Plot training & validation accuracy values
[Link](figsize=(12, 4))
[Link](1, 2, 1)
[Link]([Link]['accuracy'], label='Training Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Model Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
# Plot training & validation loss values
[Link](1, 2, 2)
[Link]([Link]['loss'], label='Training Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Model Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
OUTPUT
469/469 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.6723 - loss: 3.0517 - val_accuracy:
0.9332 - val_loss: 1.0377
Epoch 2/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9115 - loss: 0.9734 - val_accuracy:
0.9558 - val_loss: 0.6200
Epoch 3/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9234 - loss: 0.7068 - val_accuracy:
0.9594 - val_loss: 0.5285
Epoch 4/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9297 - loss: 0.6141 - val_accuracy:
0.9665 - val_loss: 0.4667
Epoch 5/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9348 - loss: 0.5682 - val_accuracy:
0.9611 - val_loss: 0.4542
Epoch 6/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9356 - loss: 0.5450 - val_accuracy:
0.9664 - val_loss: 0.4347
Epoch 7/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9388 - loss: 0.5166 - val_accuracy:
0.9677 - val_loss: 0.4123
Epoch 8/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9399 - loss: 0.5014 - val_accuracy:
0.9666 - val_loss: 0.4008
Epoch 9/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9413 - loss: 0.4942 - val_accuracy:
0.9671 - val_loss: 0.3968
Epoch 10/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9413 - loss: 0.4819 - val_accuracy:
0.9633 - val_loss: 0.4025
Epoch 11/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9408 - loss: 0.4817 - val_accuracy:
0.9604 - val_loss: 0.4079
Epoch 12/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9429 - loss: 0.4678 - val_accuracy:
0.9667 - val_loss: 0.3829
Epoch 13/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9417 - loss: 0.4644 - val_accuracy:
0.9668 - val_loss: 0.3823
Epoch 14/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9405 - loss: 0.4673 - val_accuracy:
0.9666 - val_loss: 0.3845
Epoch 15/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9422 - loss: 0.4629 - val_accuracy:
0.9653 - val_loss: 0.3773
Epoch 16/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9441 - loss: 0.4554 - val_accuracy:
0.9671 - val_loss: 0.3689
Epoch 17/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9445 - loss: 0.4488 - val_accuracy:
0.9679 - val_loss: 0.3698
Epoch 18/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9434 - loss: 0.4545 - val_accuracy:
0.9661 - val_loss: 0.3788
Epoch 19/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9417 - loss: 0.4597 - val_accuracy:
0.9646 - val_loss: 0.3805
Epoch 20/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9411 - loss: 0.4590 - val_accuracy:
0.9669 - val_loss: 0.3750
Epoch 21/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9411 - loss: 0.4576 - val_accuracy:
0.9689 - val_loss: 0.3582
Epoch 22/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9474 - loss: 0.4358 - val_accuracy:
0.9665 - val_loss: 0.3708
Epoch 23/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9427 - loss: 0.4514 - val_accuracy:
0.9667 - val_loss: 0.3659
Epoch 24/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9437 - loss: 0.4487 - val_accuracy:
0.9674 - val_loss: 0.3605
Epoch 25/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9463 - loss: 0.4357 - val_accuracy:
0.9671 - val_loss: 0.3677
Epoch 26/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.9435 - loss: 0.4481 - val_accuracy:
0.9649 - val_loss: 0.3766
Test accuracy: 0.9689
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step
Classification Report:
precision recall f1-score support
0 0.98 0.98 0.98 980
1 0.98 0.99 0.99 1135
2 0.97 0.97 0.97 1032
3 0.96 0.97 0.96 1010
4 0.97 0.98 0.97 982
5 0.97 0.95 0.96 892
6 0.96 0.97 0.97 958
7 0.96 0.97 0.96 1028
8 0.97 0.95 0.96 974
9 0.98 0.95 0.96 1009
accuracy 0.97 10000
macro avg 0.97 0.97 0.97 10000
weighted avg 0.97 0.97 0.97 10000