Experiment 1:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
def y1(x, w0, w1, w2, w3, w4):
return w0 + w1*x + w2*x**2 + w3*x**3 + w4*x**4
def y2(x):
return 45 * np.sin(x)
def error(w, x, y):
return np.sum((y1(x, w) - y)*2)
x = np.linspace(-np.pi, np.pi, 500)
y = y2(x)
w0 = np.empty(5)
from scipy.optimize import minimize
res = minimize(error, w0, args = (x, y))
w = res.x
plt.plot(x, y, 'b', label = 'y2(x) = 45 * sin(x)')
plt.plot(x, y1(x, *w), 'r', label = 'y1(x) = w0 +w1*x^2 + w3*x^3 + w4*x^4')
plt.legend()
plt.show()
Experiment 2
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.linear_model import LinearRegression
# Define the data points
x = np.array([10, 5, 20, 15, 8]).reshape(-1, 1)
y = np.array([30500, 58000, 14900, 20400, 37000])
# Fit the linear regression model
reg = LinearRegression().fit(x, y)
# Predict the price of a car that is 12 years old
price_pred = reg.predict(np.array([12]).reshape(-1, 1))
print("The predicted price of a car that is 12 years old is: Rs", price_pred[0])
# Plot the data points and the fitted line
plt.scatter(x, y, color='blue')
plt.plot(x, reg.predict(x), color='red', linewidth=2)
plt.xlabel("Age of Car (Years)")
plt.ylabel("Price (Rs)")
plt.title("Linear Regression Model")
plt.show()
Experiment 3:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
# Load the iris dataset
iris = load_iris()
X = iris.data
y = iris.target
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
# Fit the decision tree classifier model to the training data
clf = DecisionTreeClassifier(random_state=42)
clf.fit(X_train, y_train)
# Predict the target values for the test data
y_pred = clf.predict(X_test)
# Print the training data
print("Training data:")
print(X_train)
# Print the decision tree model
print("\nDecision Tree Model:")
tree.plot_tree(clf)
# Demonstrate prediction using the model
print("\nPrediction using the model:")
print(y_pred)
Experiment 4:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
# Load the iris dataset
iris = load_iris()
X = iris.data
y = iris.target
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
# Fit the KNN model to the training data
knn = KNeighborsClassifier(n_neighbors=10)
knn.fit(X_train, y_train)
# Predict the target values for the test data
y_pred = knn.predict(X_test)
# Calculate the accuracy, precision, and recall of the model
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred, average='weighted')
rec = recall_score(y_test, y_pred, average='weighted')
print("Accuracy: ", acc)
print("Precision: ", prec)
print("Recall: ", rec)
Experiment 5:
def perceptron(X, y, epochs, lr):
n_samples, n_features = X.shape
# initialize weights and bias to zeros
w = np.zeros(n_features)
b=0
for epoch in range(epochs):
for i in range(n_samples):
if y[i] * (np.dot(X[i], w) + b) <= 0
w += lr * y[i] * X[il]
b += lr *x y[i]
return w, b
# train the perceptron model
w, b = perceptron(X, y, epochs=100, lr=0.1)
# plot the output decision surface
plt.scatter(X[:50, 0], X[:50, 1], color='blue', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='red', label='versicolor"')
plt.xlabel('sepal length')
plt.ylabel('petal length')
plt.legend()
x_= np. linspace(4, 7.5, 100)
y_= —(w[0l/w[1]) * x_ - b/w[1]
plt.plot(x_, y_, 'k—-')
plt.xlim([4, 7.5])
plt.ylim([0, 7])
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
iris = load_iris()
# select the first 50 samples of setosa and versicolor classes
X = np.concatenate((iris.datal[:50, :2], iris.datal[50:100, :2]), axis=0)
y = np.concatenate((np.ones(50), -np.ones(50)))
# plot the input scatter plot
plt.scatter(X[:50, 0], X[:50, 1], color='blue', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='red', label='versicolor"')
plt.xlabel('sepal length")
plt.ylabel('petal length')
plt.legend()
plt.show()
Experiment 6 :
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy as sp
grid = np.array([
[0, 0, 0],
[0, 0, 1],
])
actions = ["up", "down", "left", "right"]
Q = np.zeros((2, 3, 4))
def q_learning(Q, grid, actions, alpha=0.1, gamma=0.9, episodes=2000):
for episode in range (episodes):
state = (0, 0)
done = False
while not done:
action = np.argmax(Q[state])
if np.random.random() < 0.1:
action = np.random.randint(len(actions))
next_state, reward, done = take_action(grid, state, actions[action])
Q[state][action] += alpha * (reward + gamma * np.max(Q[next_state]) -
Q[state][action])
state = next_state
return np.argmax(Q, axis=2)
def take_action(grid, state, action):
i, j = state
if action == "up":
i-=1
elif action == "down":
i+=1
elif action == "left":
j -= 1
elif action == "right":
j += 1
reward = grid[i][j] if 0 <= i < grid.shape[0] and 0 <= j < grid.shape[1] else 0
done = (reward == 1)
i = max(0, min(i, grid.shape[0] - 1))
j = max(0, min(j, grid.shape[1] - 1))
return (i, j), reward, done
policy = q_learning(Q, grid, actions)
policy=np.array([[actions[j] for j in i] for i in policy])
policy[1][2]='G'
print (policy)
Experiment 7
from queue import PriorityQueue
class Node:
def _ init__(self, state, parent=None):
self.state = state
self.parent = parent
self.g = 0
self.h = 0
self.f = 0
def_1t__ (self, other):
return self.f < other.f
def_eq_(self, other):
return self.state == other.state
def best_first_search(start_state):
open_list = PriorityQueue()
closed_list = set()
start_node = Node(start_ state)
start_node.h = 1
open_list.put(start_node)
while not open_list.empty():
current_node = open_list.get()
current_state = current_node.state
if current_state[0] == 2:
return get_solution_path(current_node)
closed list.add (current state)
for successor_state in get_successors(current_state):
if successor_state not in closed list:
successor_node = Node(successor_state, parent=current_ node)
successor_node.g = current_node.g + 1
successor_node.h = heuristic(successor_state)
successor_node.f=successor_node.g+successor_node.h
open_list.put(successor_node)
return None
get_successors (state):
successors = set()
x, y = state
# £ill 4L jug
successors.add((4, y))
# £ill 3L jug
successors.add((x, 3))
# empty 4L jug
successors.add((0, y))
# empty 3L jug
successors.add((x, 0))
# pour 4L into 3L until 3L is full or 4L is empty
amount = min(x, 3-y)
successors.add( (x-amount, y+amount))
# pour 3L into 4L until 4L is full or 3L is empty
amount = min(y, 4-x)
successors.add( (x+amount, y-amount))
return successors
def heuristic (state):
# estimate of the number of steps required to reach the goal
#state from the current state
xX, y = state
if x == 2:
return 0
elif y == 0:
return 1
elif x == 4 and y != 2:
return 2
elif x == 3 and y
return 3
else:
return 4
def get_solution_path(node):
path = []
while node is not None:
path.append(node.state)
node = node.parent
return list(reversed(path))
start_state = (0, 0)
solution_path = best_first_search(start_state)
print(solution_path)