11/1/23, 9:37 PM ann_lab.
ipynb - Colaboratory
v2 = -2.0
bias: 2.0
Logistic regression
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from [Link] import accuracy_score
X = [Link]([[1.2, 2.3],
[2.8, 3.9],
[0.4, 1.8],
[3.0, 2.5],
[2.5, 1.0]])
y = [Link]([0, 1, 0, 1, 0])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=60)
logistic_regression_model = LogisticRegression()
logistic_regression_model.fit(X_train, y_train)
y_pred = logistic_regression_model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy * 100:.2f}%")
Accuracy: 100.00%
Back-propagation
import numpy as np
X = [Link](([2, 9], [1, 5], [3, 6]), dtype=float)
y = [Link](([92], [86], [89]), dtype=float)
X = X/[Link](X,axis=0)
y = y/100
def sigmoid (x):
return 1/(1 + [Link](-x))
def derivatives_sigmoid(x):
return x * (1 - x)
epoch=5
lr=0.1
inputlayer_neurons = 2
hiddenlayer_neurons = 3
output_neurons = 1
wh=[Link](size=(inputlayer_neurons,hiddenlayer_neurons))
bh=[Link](size=(1,hiddenlayer_neurons))
wout=[Link](size=(hiddenlayer_neurons,output_neurons))
bout=[Link](size=(1,output_neurons))
for i in range(epoch):
hinp1=[Link](X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=[Link](hlayer_act,wout)
outinp= outinp1+bout
output = sigmoid(outinp)
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO * outgrad
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad
wout += hlayer_act.[Link](d_output) *lr
wh += [Link](d hiddenlayer) *lr
[Link] 1/13
11/1/23, 9:37 PM ann_lab.ipynb - Colaboratory
print ("-----------Epoch-", i+1, "Starts----------------")
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)
print ("-----------Epoch-", i+1, "Ends----------------\n")
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)
-----------Epoch- 1 Starts----------
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.76779577]
[0.75723532]
[0.76896431]]
-----------Epoch- 1 Ends----------
-----------Epoch- 2 Starts----------
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.76986431]
[0.75919163]
[0.77101865]]
-----------Epoch- 2 Ends----------
-----------Epoch- 3 Starts----------
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.77187568]
[0.76109505]
[0.77301617]]
-----------Epoch- 3 Ends----------
-----------Epoch- 4 Starts----------
Input:
[[0.66666667 1. ]
[0.33333333 0.55555556]
[1. 0.66666667]]
Actual Output:
[[0.92]
[0.86]
[0.89]]
Predicted Output:
[[0.77383217]
[0.76294764]
[0.77495914]]
Activation functions
import numpy as np
def binary_step(x, threshold):
return 1 if x >= threshold else 0
def linear_activation(x):
return x
def sigmoid_activation(x):
return 1 / (1 + [Link](-x))
def tanh_activation(x):
return [Link](x)
[Link] 2/13
11/1/23, 9:37 PM ann_lab.ipynb - Colaboratory
def relu_activation(x):
return max(0, x)
input_value = 2.0
threshold = 0.5
binary_step_result = binary_step(input_value, threshold)
linear_activation_result = linear_activation(input_value)
sigmoid_activation_result = sigmoid_activation(input_value)
tanh_activation_result = tanh_activation(input_value)
relu_activation_result = relu_activation(input_value)
print("Binary Step Function:", binary_step_result)
print("Linear Activation Function:", linear_activation_result)
print("Sigmoid Activation Function:", sigmoid_activation_result)
print("Tanh Activation Function:", tanh_activation_result)
print("ReLU Activation Function:", relu_activation_result)
Binary Step Function: 1
Linear Activation Function: 2.0
Sigmoid Activation Function: 0.8807970779778823
Tanh Activation Function: 0.9640275800758169
ReLU Activation Function: 2.0
support vector machine
import numpy as np
from sklearn import datasets
from sklearn import svm
import [Link] as plt
X, y = datasets.make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, random_state=42)
clf = [Link](kernel='linear')
[Link](X, y)
[Link](X[:, 0], X[:, 1], c=y, cmap=[Link])
ax = [Link]()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xx, yy = [Link]([Link](xlim[0], xlim[1], 50),
[Link](ylim[0], ylim[1], 50))
Z = clf.decision_function(np.c_[[Link](), [Link]()])
Z = [Link]([Link])
[Link](xx, yy, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
[Link]()
logistic regression
import numpy as np
import [Link] as plt
[Link] 3/13
11/1/23, 9:37 PM ann_lab.ipynb - Colaboratory
[Link](0)
X = [Link](100, 2)
y = (X[:, 0] + X[:, 1] > 0).astype(int)
X_train, X_test = X[:80], X[80:]
y_train, y_test = y[:80], y[80:]
def sigmoid(z):
return 1 / (1 + [Link](-z))
theta = [Link](3)
X_train = [Link](([Link]((X_train.shape[0], 1)), X_train))
X_test = [Link](([Link]((X_test.shape[0], 1)), X_test))
learning_rate = 0.1
epochs = 1000
for epoch in range(epochs):
z = [Link](X_train, theta)
h = sigmoid(z)
gradient = [Link](X_train.T, (h - y_train)) / y_train.size
theta -= learning_rate * gradient
predicted_probabilities = sigmoid([Link](X_test, theta))
predicted_labels = (predicted_probabilities > 0.5).astype(int)
accuracy = [Link](predicted_labels == y_test)
print(f"Accuracy: {accuracy}")
x_values = [Link]([[Link](X[:, 0]) - 1, [Link](X[:, 0]) + 1])
y_values = - (theta[0] + theta[1] * x_values) / theta[2]
[Link](X[:, 0], X[:, 1], c=y)
[Link](x_values, y_values, label='Decision Boundary')
[Link]('Feature 1')
[Link]('Feature 2')
[Link]()
[Link]()
Accuracy: 1.0
Auto-Associative memory
import numpy as np
source = [-1, 1, 1, 1]
target = source
sourcet = [Link](source)
weight = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
for i in range(len(source)):
for j in range(len(source)):
weight[i][j] = source[i] * target[j]
print("Weight Matrix:")
for row in weight:
print(row)
output = [0] * len(source)
for i in range(len(source)):
for j in range(len(source)):
output[i] += source[j] * weight[j][i]
[Link] 4/13
11/1/23, 9:37 PM ann_lab.ipynb - Colaboratory
print("Output Matrix:", output)
is_auto_associative = all(output[i] % source[i] == 0 for i in range(len(source)))
if is_auto_associative:
print("It is auto-associative")
else:
print("It is not auto-associative")
Weight Matrix:
[1, -1, -1, -1]
[-1, 1, 1, 1]
[-1, 1, 1, 1]
[-1, 1, 1, 1]
Output Matrix: [-4, 4, 4, 4]
It is auto-associative
Hetero-Associative memory
import numpy as np
s1=[[1,1,0,0]]
s2=[[1,1,1,0]]
s3=[[0,0,1,1]]
s4=[[0,1,0,0]]
t1=[[1,0]]
t2=[[0,1]]
t3=[[1,0]]
t4=[[1,0]]
y11=[0,0,0,0]
a=[Link](s1)
b=[Link](s2)
c=[Link](s3)
d=[Link](s4)
S1=[Link](a,t1)
S2=[Link](b,t2)
S3=[Link](c,t3)
S4=[Link](d,t4)
x=S1+S2+S3+S4
print(x)
print()
def fun(j,x):
y=[Link](j,x)
y1=[Link](-1)
y11=[None for i in range(len(y1))]
for i in range(len(y1)):
y11[i]=1 if y1[i]>1 else 0
print(y11)
print()
fun(s1,x)
fun(s2,x)
fun(s3,x)
fun(s4,x)
[[1 1]
[2 1]
[1 1]
[1 0]]
[1, 1]
[1, 1]
[1, 0]
[1, 0]
Bi-directional associative memory
import numpy as np
x1 = [Link]([1, 1, 1, 1, 1, 1]).reshape(6, 1)
x2 = [Link]([-1, -1, -1, -1, -1, -1]).reshape(6, 1)
x3 = [Link]([1, 1, -1, -1, 1, 1]).reshape(6, 1)
x4 = [Link]([-1, -1, 1, 1, -1, -1]).reshape(6, 1)
y1 = [Link]([1, 1, 1]).reshape(3, 1)
y2 = [Link]([-1, -1, -1]).reshape(3, 1)
y3 = [Link]([1, -1, 1]).reshape(3, 1)
y4 = [Link]([-1, 1, -1]).reshape(3, 1)
[Link] 5/13
11/1/23, 9:37 PM ann_lab.ipynb - Colaboratory
inputSet = [Link]((x1, x2, x3, x4), axis=1)
targetSet = [Link]((y1.T, y2.T, y3.T, y4.T), axis=0)
print("\nWeight matrix:")
weight = [Link](inputSet, targetSet)
print(weight)
print("\n-------------------------------------------------")
print("\nTesting for input patterns: Set A")
def testInputs(x, weight):
y = [Link](weight.T, x)
y[y < 0] = -1
y[y >= 0] = 1
return [Link](y)
print("\nOutput of input pattern 1")
print(testInputs(x1, weight))
print("\nOutput of input pattern 2")
print(testInputs(x2, weight))
print("\nOutput of input pattern 3")
print(testInputs(x3, weight))
print("\nOutput of input pattern 4")
print(testInputs(x4, weight))
print("\nTesting for target patterns: Set B")
def testTargets(y, weight):
x = [Link](weight, y)
x[x <= 0] = -1
x[x > 0] = 1
return [Link](x)
print("\nOutput of target pattern 1")
print(testTargets(y1, weight))
print("\nOutput of target pattern 2")
print(testTargets(y2, weight))
print("\nOutput of target pattern 3")
print(testTargets(y3, weight))
print("\nOutput of target pattern 4")
print(testTargets(y4, weight))
def is_bam_model(inputSet, targetSet, weight):
input_patterns_correct = [Link](inputSet == testTargets(targetSet.T, weight))
target_patterns_correct = [Link](targetSet.T == testInputs(inputSet, weight))
return input_patterns_correct and target_patterns_correct
if is_bam_model(inputSet, targetSet, weight):
print("\nIt is a BAM model.")
else:
print("\nIt is not a BAM model.")
[[ 1 -1 1 -1]
[ 1 -1 1 -1]
[ 1 -1 -1 1]
[ 1 -1 -1 1]
[ 1 -1 1 -1]
[ 1 -1 1 -1]]
Weight matrix:
[[4 0 4]
[4 0 4]
[0 4 0]
[0 4 0]
[4 0 4]
[4 0 4]]
Testing for input patterns: Set A
Output of input pattern 1
[[1]
[1]
[1]]
Output of input pattern 2
[[-1]
[-1]
[-1]]
Output of input pattern 3
[[ 1]
[-1]
[ 1]]
[Link] 6/13
11/1/23, 9:37 PM ann_lab.ipynb - Colaboratory
Output of input pattern 4
[[-1]
[ 1]
[-1]]
Testing for target patterns: Set B
Output of target pattern 1
[[1]
[1]
[1]
[1]
[1]
[1]]
Output of target pattern 2
[[-1]
[-1]
[-1]
[-1]
[-1]
[-1]]
[Link] 7/13