0% found this document useful (0 votes)
44 views9 pages

Neural Network Implementations

Uploaded by

Dinesh R Balaji
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
44 views9 pages

Neural Network Implementations

Uploaded by

Dinesh R Balaji
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd

#1.

Implementation of Perceptron Algorithm for AND Logic Gate with 2-


bit Binary Input
import numpy as np

# Input and target output for AND gate


X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([0, 0, 0, 1])

# Initialize weights, bias, and learning rate


weights = [Link](2)
bias = 0
learning_rate = 0.1
epochs = 10

# Training the perceptron


for _ in range(epochs):
for i in range(len(X)):
y_pred = 1 if [Link](X[i], weights) + bias > 0 else 0
error = y[i] - y_pred
weights += learning_rate * error * X[i]
bias += learning_rate * error

# Testing the perceptron


for x in X:
output = 1 if [Link](x, weights) + bias > 0 else 0
print(f"Input: {x}, Predicted Output: {output}")

#2. Design and implement a Madaline network to solve the XOR problem.
Simulate the network for non-linearly separable data and demonstrate
the results.
import numpy as np

# XOR input and target output


X = [Link]([[1, 1], [1, 0], [0, 1], [0, 0]])
y = [Link]([0, 1, 1, 0])

# Manually define weights and biases after training


weights_hidden = [Link]([[1, -1], [-1, 1]]) # Pre-trained weights
for hidden layer
bias_hidden = [Link]([-0.5, -0.5]) # Pre-trained biases for hidden
layer
weights_output = [Link]([1, 1]) # Pre-trained weights for output
layer
bias_output = -0.5 # Pre-trained bias for output layer

# Activation function
def activation(x):
return [Link](x > 0, 1, 0)

# Testing the XOR logic


print("Testing XOR logic:")
for x in X:
hidden = activation([Link](x, weights_hidden.T) + bias_hidden) #
Hidden layer output
output = activation([Link](hidden, weights_output) +
bias_output) # Final output
print(f"Input: {x}, Predicted Output: {output}")

#3. Write a program to implement a Multi-Layer Perceptron using the


backpropagation algorithm. Train the MLP on a classification problem,
using gradient descent for weight updates.
import numpy as np

# Sigmoid and its derivative


sigmoid = lambda x: 1 / (1 + [Link](-x))
sigmoid_derivative = lambda x: x * (1 - x)

# XOR inputs and outputs


X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([[0], [1], [1], [0]])

# Initialize weights and biases


[Link](0)
w1, w2 = [Link](2, 2), [Link](2, 1)
b1, b2 = [Link](1, 2), [Link](1, 1)

# Train MLP
for _ in range(10000):
h = sigmoid([Link](X, w1) + b1) # Hidden layer
o = sigmoid([Link](h, w2) + b2) # Output layer
d2 = (y - o) * sigmoid_derivative(o) # Output delta
d1 = [Link](d2, w2.T) * sigmoid_derivative(h) # Hidden delta
w2 += [Link](h.T, d2) * 0.1 # Update weights
b2 += [Link](d2, axis=0, keepdims=True) * 0.1
w1 += [Link](X.T, d1) * 0.1
b1 += [Link](d1, axis=0, keepdims=True) * 0.1

# Test MLP
for x in X:
h = sigmoid([Link](x, w1) + b1)
o = sigmoid([Link](h, w2) + b2)
print(f"Input: {x}, Output: {[Link]()}")

#4. Write a program to implement a Radial Basis Function Network (RBFN)


for pattern classification. Use Gaussian radial basis functions, and
train the network using supervised learning techniques.
import numpy as np

# Gaussian RBF
def rbf(x, center, sigma):
return [Link](-[Link](x - center) ** 2 / (2 * sigma ** 2))

# Train the RBF Network


def train_rbfn(X, y, centers, sigma):
G = [Link](([Link][0], len(centers)))
for i, x in enumerate(X):
for j, c in enumerate(centers):
G[i, j] = rbf(x, c, sigma)
weights = [Link](G).dot(y) # Calculate weights using
pseudoinverse
return weights

# Predict with RBF Network


def predict_rbfn(X, centers, weights, sigma):
G = [Link](([Link][0], len(centers)))
for i, x in enumerate(X):
for j, c in enumerate(centers):
G[i, j] = rbf(x, c, sigma)
return [Link]([Link](weights))

# XOR problem
X = [Link]([[0, 0], [0, 1], [1, 0], [1, 1]])
y = [Link]([0, 1, 1, 0])

# Set RBF centers (using training points as centers) and sigma


centers = X
sigma = 1.0

# Train and test RBF Network


weights = train_rbfn(X, y, centers, sigma)
predictions = predict_rbfn(X, centers, weights, sigma)

# Output results
print("Input:\n", X)
print("Predicted Output:\n", predictions)
#5. Implement an autoassociative memory network to store and recall
patterns. Write a program that uses Hebbian learning to train the
network and test its performance with noisy inputs.
import numpy as np

# Hebbian learning
def train_hebbian(patterns):
return [Link](patterns.T, patterns)

# Recall a pattern
def recall(pattern, weights):
return [Link]([Link](pattern, weights))

# Patterns to store
patterns = [Link]([
[1, -1, 1, -1],
[-1, 1, -1, 1]
])

# Train the network


weights = train_hebbian(patterns)

# Test with noisy input


noisy_input = [Link]([1, -1, 1, 1]) # Slightly noisy version of the
first pattern
recalled = recall(noisy_input, weights)

print("Noisy Input:", noisy_input)


print("Recalled Pattern:", recalled)

#6. Implement a heteroassociative memory network that maps input


patterns to output patterns. Train the network using Hebbian learning
and test its performance by introducing noise in the input.
import numpy as np

# Hebbian learning for heteroassociative memory


def train_heteroassociative(input_patterns, output_patterns):
return [Link](input_patterns.T, output_patterns)

# Recall a pattern
def recall(input_pattern, weights):
return [Link](input_pattern, weights)

# Input and output patterns


input_patterns = [Link]([
[1, -1, 1],
[-1, 1, -1]
])
output_patterns = [Link]([
[1, 1],
[-1, -1]
])

# Train the network


weights = train_heteroassociative(input_patterns, output_patterns)

# Test with noisy input


noisy_input = [Link]([1, -1, -1]) # Noisy version of the first
pattern
recalled_output = recall(noisy_input, weights)

print("Noisy Input:", noisy_input)


print("Recalled Output:", recalled_output)

#7. Write a program to implement a Bidirectional Associative Memory


(BAM). Train the network to recall patterns in both directions, i.e.,
input-to-output and output-to-input, and test its recall accuracy.
import numpy as np

# Hebbian learning for BAM


def train_bam(input_patterns, output_patterns):
# Compute weights for input-to-output and output-to-input
w_in_out = [Link](input_patterns.T, output_patterns)
w_out_in = [Link](output_patterns.T, input_patterns)
return w_in_out, w_out_in

# Recall from input to output


def recall_input_to_output(input_pattern, w_in_out):
return [Link]([Link](input_pattern, w_in_out))

# Recall from output to input


def recall_output_to_input(output_pattern, w_out_in):
return [Link]([Link](output_pattern, w_out_in))

# Input and output patterns


input_patterns = [Link]([
[1, -1, 1],
[-1, 1, -1]
])

output_patterns = [Link]([
[1, 1],
[-1, -1]
])
# Train the BAM network
w_in_out, w_out_in = train_bam(input_patterns, output_patterns)

# Test recall accuracy


input_test = [Link]([1, -1, 1]) # Test input pattern
output_test = [Link]([1, 1]) # Test output pattern

recalled_output = recall_input_to_output(input_test, w_in_out)


recalled_input = recall_output_to_input(output_test, w_out_in)

print("Input Test:", input_test)


print("Recalled Output:", recalled_output)
print("Output Test:", output_test)
print("Recalled Input:", recalled_input)

#8. Implement a Hopfield network for pattern storage and recall. Write
a program that demonstrates the energy minimization process and the
ability to recall patterns even in the presence of noise.
import numpy as np

# Hopfield Network class


class HopfieldNetwork:
def __init__(self, num_neurons):
self.num_neurons = num_neurons
[Link] = [Link]((num_neurons, num_neurons))

# Train the network using Hebbian learning


def train(self, patterns):
for pattern in patterns:
[Link] += [Link](pattern, pattern)
np.fill_diagonal([Link], 0) # No self-connections

# Recall a pattern with noise and energy minimization


def recall(self, noisy_pattern, iterations=5):
pattern = noisy_pattern
for _ in range(iterations):
for i in range(self.num_neurons):
# Update each neuron based on the weighted sum of
others
sum_input = [Link]([Link][i], pattern)
pattern[i] = 1 if sum_input >= 0 else -1
return pattern

# Define stored patterns


patterns = [Link]([
[1, -1, 1],
[-1, 1, -1]
])

# Create the Hopfield network and train it


hopfield = HopfieldNetwork(num_neurons=3)
[Link](patterns)

# Test with noisy input (introducing noise)


noisy_input = [Link]([1, -1, -1]) # Noisy version of the first
pattern

# Recall the pattern from the noisy input


recalled_pattern = [Link](noisy_input)
print("Noisy Input:", noisy_input)
print("Recalled Pattern:", recalled_pattern)

#9. Implement a fixed-weight competitive network where neurons compete


to be activated. Simulate the network for clustering input patterns and
demonstrate the winner-takes-all strategy.
import numpy as np

# Competitive Network
class CompetitiveNetwork:
def __init__(self, num_neurons, input_dim):
self.num_neurons = num_neurons
[Link] = [Link](num_neurons, input_dim)

# Find the winner neuron based on the minimum distance


def winner_takes_all(self, input_pattern):
distances = [Link]([Link] - input_pattern,
axis=1)
winner = [Link](distances)
return winner

# Input patterns for clustering


inputs = [Link]([
[1, 1],
[1, -1],
[-1, 1],
[-1, -1]
])

# Create and train the competitive network


network = CompetitiveNetwork(num_neurons=2, input_dim=2)

# Simulate the network for clustering


for input_pattern in inputs:
winner = network.winner_takes_all(input_pattern)
print(f"Input: {input_pattern}, Winner Neuron: {winner}")

#10. Implement a Recurrent Neural Network (RNN) for predicting the


next character in a string "HELLO." Evaluate the model's performance
based on accuracy and loss metrics after training.
import numpy as np
from [Link] import Sequential
from [Link] import LSTM, Dense, Dropout
from [Link] import to_categorical

# Prepare data
data = "HELLO"
chars = sorted(set(data))
char_to_int = {c: i for i, c in enumerate(chars)}
dataX = [char_to_int[data[i]] for i in range(len(data)-1)]
dataY = [char_to_int[data[i+1]] for i in range(len(data)-1)]

X = [Link](dataX, (len(dataX), 1, 1)) / len(chars)


y = to_categorical(dataY)

# Build the model


model = Sequential([
LSTM(256, input_shape=([Link][1], [Link][2])),
Dropout(0.2),
Dense(len(chars), activation='softmax')
])

[Link](loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])

# Train and evaluate the model


[Link](X, y, epochs=1000, batch_size=1, verbose=0)
loss, accuracy = [Link](X, y, verbose=0)

print(f"Accuracy: {accuracy*100:.2f}%, Loss: {loss:.2f}")

# Predict the next character after the last input character


prediction_input = [Link](dataX[-1], (1, 1, 1)) / len(chars)
predicted_prob = [Link](prediction_input, verbose=0)
predicted_idx = [Link](predicted_prob)
predicted_char =
list(char_to_int.keys())[list(char_to_int.values()).index(predicted_idx
)]
print(f"Predicted next character: {predicted_char}")

You might also like