Q1.
write a program to implement breadth first search using
python.
from collections import deque
def bfs(graph, start):
visited = set()
queue = deque([start])
visited.add(start)
while queue:
current_node = queue.popleft()
print(current_node, end=' ')
for neighbor in graph[current_node]:
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
# Example graph represented as an adjacency list
graph = {
'A': ['B', 'C'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['B', 'H'],
'F': ['C'],
'G': ['C'],
'H': ['E']
}
# Starting node for BFS
start_node = 'A'
print(f"BFS starting from node {start_node}:")
bfs(graph, start_node)
Q2. write a program to implement depth first
search program using python.
def dfs(graph, start, visited):
if start not in visited:
print(f"Visiting node: {start}")
visited.add(start)
for neighbor in graph[start]:
dfs(graph, neighbor, visited)
# Example graph represented as an adjacency list
graph = {
'A': ['B', 'C'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['B', 'H'],
'F': ['C'],
'G': ['C'],
'H': ['E']
}
# Starting node for DFS
start_node = 'A'
print(f"DFS starting from node {start_node}:")
visited_nodes = set()
dfs(graph, start_node, visited_nodes)
Q3. write a program to implement tic - tac - toe
game using python.
def print_board(board):
for row in board:
print(" | ".join(row))
print("-" * 5)
def check_winner(board):
# Check rows
for row in board:
if all(cell == row[0] for cell in row) and row[0] != ' ':
return True
# Check columns
for col in range(3):
if all(board[row][col] == board[0][col] for row in range(3)) and board[0]
[col] != ' ':
return True
# Check diagonals
if board[0][0] == board[1][1] == board[2][2] != ' ' or board[0][2] ==
board[1][1] == board[2][0] != ' ':
return True
return False
def is_board_full(board):
return all(cell != ' ' for row in board for cell in row)
def tic_tac_toe():
board = [[' ' for _ in range(3)] for _ in range(3)]
current_player = 'X'
while True:
print_board(board)
row = int(input(f"Player {current_player}, enter row (0-2): "))
col = int(input(f"Player {current_player}, enter column (0-2): "))
if 0 <= row < 3 and 0 <= col < 3 and board[row][col] == ' ':
board[row][col] = current_player
if check_winner(board):
print_board(board)
print(f"Player {current_player} wins!")
break
elif is_board_full(board):
print_board(board)
print("It's a tie!")
break
current_player = 'O' if current_player == 'X' else 'X'
else:
print("Invalid move. Try again.")
if __name__ == "__main__":
tic_tac_toe()
Q.4 write a program to implement 8 puzzle
problem program using python.
from queue import PriorityQueue
class PuzzleNode:
def __init__(self, state, parent=None, move=None):
self.state = state
self.parent = parent
self.move = move
self.cost = self.calculate_cost()
def calculate_cost(self):
# A* heuristic: Manhattan distance
goal_state = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
cost = 0
for i in range(3):
for j in range(3):
if self.state[i][j] != 0:
row, col = divmod(self.state[i][j] - 1, 3)
cost += abs(row - i) + abs(col - j)
return cost
def __lt__(self, other):
return self.cost < other.cost
def print_board(board):
for row in board:
print(" ".join(map(str, row)))
print()
def get_blank_position(board):
for i in range(3):
for j in range(3):
if board[i][j] == 0:
return i, j
def is_goal_state(board):
return board == [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
def generate_neighbors(node):
i, j = get_blank_position(node.state)
neighbors = []
for move_i, move_j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
new_i, new_j = i + move_i, j + move_j
if 0 <= new_i < 3 and 0 <= new_j < 3:
new_state = [row[:] for row in node.state]
new_state[i][j], new_state[new_i][new_j] = new_state[new_i][new_j],
new_state[i][j]
neighbors.append(PuzzleNode(new_state, node, (i, j)))
return neighbors
def trace_solution(node):
path = []
while node:
path.append((node.move[0] * 3 + node.move[1]) if node.move else
None)
node = node.parent
return path[::-1]
def solve_puzzle(initial_state):
start_node = PuzzleNode(initial_state)
frontier = PriorityQueue()
frontier.put(start_node)
explored = set()
while not frontier.empty():
current_node = frontier.get()
if is_goal_state(current_node.state):
solution_path = trace_solution(current_node)
return solution_path
explored.add(tuple(map(tuple, current_node.state)))
for neighbor in generate_neighbors(current_node):
if tuple(map(tuple, neighbor.state)) not in explored:
frontier.put(neighbor)
explored.add(tuple(map(tuple, neighbor.state)))
return None
if __name__ == "__main__":
# Example initial state (you can modify this)
initial_state = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
print("Initial State:")
print_board(initial_state)
solution_path = solve_puzzle(initial_state)
if solution_path:
print("Solution Path:")
for move in solution_path:
if move is not None:
print(f"Move {move + 1}:")
print_board([[1, 2, 3], [4, 5, 6], [7, 8, 0]])
else:
print("No solution found.")
Q.5. write a program to implement water jug problem
program using python.
class WaterJugProblem:
def __init__(self, jug1_capacity, jug2_capacity, target_amount):
self.jug1_capacity = jug1_capacity
self.jug2_capacity = jug2_capacity
self.target_amount = target_amount
self.state = (0, 0) # Initial state: both jugs empty
def fill_jug1(self, state):
return (self.jug1_capacity, state[1])
def fill_jug2(self, state):
return (state[0], self.jug2_capacity)
def empty_jug1(self, state):
return (0, state[1])
def empty_jug2(self, state):
return (state[0], 0)
def pour_jug1_to_jug2(self, state):
pour_amount = min(state[0], self.jug2_capacity - state[1])
return (state[0] - pour_amount, state[1] + pour_amount)
def pour_jug2_to_jug1(self, state):
pour_amount = min(state[1], self.jug1_capacity - state[0])
return (state[0] + pour_amount, state[1] - pour_amount)
def is_goal_state(self, state):
return state[0] == self.target_amount or state[1] == self.target_amount
def solve(self):
visited_states = set()
frontier = [self.state]
while frontier:
current_state = frontier.pop(0)
if self.is_goal_state(current_state):
return self.trace_solution(current_state)
visited_states.add(current_state)
next_states = [
self.fill_jug1(current_state),
self.fill_jug2(current_state),
self.empty_jug1(current_state),
self.empty_jug2(current_state),
self.pour_jug1_to_jug2(current_state),
self.pour_jug2_to_jug1(current_state),
]
for next_state in next_states:
if next_state not in visited_states and next_state not in frontier:
frontier.append(next_state)
return None
def trace_solution(self, final_state):
path = [final_state]
current_state = final_state
while current_state != (0, 0):
for next_state in [
self.fill_jug1(current_state),
self.fill_jug2(current_state),
self.empty_jug1(current_state),
self.empty_jug2(current_state),
self.pour_jug1_to_jug2(current_state),
self.pour_jug2_to_jug1(current_state),
]:
if next_state in path:
path.insert(0, next_state)
current_state = next_state
break
return path
if __name__ == "__main__":
# Example: Jug1 capacity = 4, Jug2 capacity = 3, target amount = 2
water_jug_problem = WaterJugProblem(4, 3, 2)
solution_path = water_jug_problem.solve()
if solution_path:
print("Solution Path:")
for state in solution_path:
print(f"Jug1: {state[0]}, Jug2: {state[1]}")
else:
print("No solution found.")
Q.6 write a program to implement travelling saleman
problrm program using python.
import itertools
def calculate_distance(city1, city2):
return ((city1[0] - city2[0]) ** 2 + (city1[1] - city2[1]) ** 2) ** 0.5
def total_distance(path, cities):
distance = 0
for i in range(len(path) - 1):
distance += calculate_distance(cities[path[i]], cities[path[i + 1]])
distance += calculate_distance(cities[path[-1]], cities[path[0]]) # Return to the
starting city
return distance
def nearest_neighbor(cities):
num_cities = len(cities)
unvisited = set(range(1, num_cities)) # Start from city 0, so exclude it from
unvisited set
path = [0] # Start from city 0
current_city = 0
while unvisited:
nearest_city = min(unvisited, key=lambda city:
calculate_distance(cities[current_city], cities[city]))
path.append(nearest_city)
unvisited.remove(nearest_city)
current_city = nearest_city
return path
def tsp_bruteforce(cities):
num_cities = len(cities)
all_permutations = itertools.permutations(range(num_cities))
min_distance = float('inf')
best_path = None
for permutation in all_permutations:
distance = total_distance(permutation, cities)
if distance < min_distance:
min_distance = distance
best_path = list(permutation)
return best_path
if __name__ == "__main__":
# Example: Cities represented as (x, y) coordinates
cities = [(0, 0), (1, 2), (2, 4), (3, 1)]
# Nearest Neighbor Algorithm
nn_path = nearest_neighbor(cities)
nn_distance = total_distance(nn_path, cities)
print("Nearest Neighbor Algorithm:")
print("Path:", nn_path)
print("Total Distance:", nn_distance)
# Brute Force Algorithm (for small numbers of cities)
bf_path = tsp_bruteforce(cities)
bf_distance = total_distance(bf_path, cities)
print("\nBrute Force Algorithm:")
print("Path:", bf_path)
print("Total Distance:", bf_distance)
Q.7 write a program to implement tower of hanoi program
using python.
def tower_of_hanoi(n, source, target, auxiliary):
if n > 0:
# Move n-1 disks from source to auxiliary peg
tower_of_hanoi(n - 1, source, auxiliary, target)
# Move the nth disk from source to target peg
print(f"Move disk {n} from {source} to {target}")
# Move the n-1 disks from auxiliary peg to target peg
tower_of_hanoi(n - 1, auxiliary, target, source)
if __name__ == "__main__":
# Example: 3 disks on peg A, moving to peg C with the help of peg B
num_disks = 3
tower_of_hanoi(num_disks, 'A', 'C', 'B')
Q8. write a program to implement monkey banana
problem program using python.
class MonkeyBananaProblem:
def __init__(self):
self.state = {'monkey': (1, 1), 'box': (2, 2), 'banana': (4, 4)}
def is_goal_state(self, state):
return state['monkey'] == state['banana']
def actions(self, state):
possible_actions = []
if state['monkey'][0] < state['box'][0]:
possible_actions.append('push box up')
elif state['monkey'][0] > state['box'][0]:
possible_actions.append('push box down')
if state['monkey'][1] < state['box'][1]:
possible_actions.append('push box left')
elif state['monkey'][1] > state['box'][1]:
possible_actions.append('push box right')
if state['monkey'] == state['box']:
possible_actions.append('climb box')
return possible_actions
def result(self, state, action):
new_state = dict(state)
if 'push' in action:
direction = action.split()[-1]
if direction == 'up':
new_state['box'] = (state['box'][0] - 1, state['box'][1])
elif direction == 'down':
new_state['box'] = (state['box'][0] + 1, state['box'][1])
elif direction == 'left':
new_state['box'] = (state['box'][0], state['box'][1] - 1)
elif direction == 'right':
new_state['box'] = (state['box'][0], state['box'][1] + 1)
elif action == 'climb box':
new_state['monkey'] = state['box']
return new_state
def solve_monkey_banana_problem():
problem = MonkeyBananaProblem()
current_state = problem.state
print("Initial State:")
print(current_state)
while not problem.is_goal_state(current_state):
possible_actions = problem.actions(current_state)
print("\nPossible Actions:")
for i, action in enumerate(possible_actions, 1):
print(f"{i}. {action}")
selected_action_index = int(input("\nEnter the number corresponding to
the action to perform: ")) - 1
selected_action = possible_actions[selected_action_index]
current_state = problem.result(current_state, selected_action)
print("\nCurrent State:")
print(current_state)
print("\nGoal State Reached! Monkey got the banana!")
if __name__ == "__main__":
solve_monkey_banana_problem()
Q.9 write a program to implement charecter
recognition problem program using python.
from PIL import Image
import pytesseract
def recognize_characters(image_path):
# Open the image file
img = Image.open(image_path)
# Use Tesseract OCR to recognize characters
text = pytesseract.image_to_string(img)
return text
if __name__ == "__main__":
# Example: Replace 'your_image_path.png' with the path to your image
file
image_path = 'your_image_path.png'
result = recognize_characters(image_path)
print("Recognized Characters:")
print(result)
Q.10 write a program to implement working of bayesion
network program using python.
class BayesianNetwork:
def __init__(self):
self.nodes = {'I': 0.7, 'S': 0.8, 'P': {'I': {True: {'S': {True: 0.9, False: 0.2}},
False: {'S': {True: 0.6, False: 0.1}}}}}
def update_node(self, node_name, evidence):
if node_name in self.nodes:
if isinstance(self.nodes[node_name], float):
# Node is a basic probability
return self.nodes[node_name]
elif isinstance(self.nodes[node_name], dict):
# Node is a conditional probability table
return self.nodes[node_name][evidence[node_name]]
else:
raise ValueError(f"Node {node_name} not found in the Bayesian Network.")
def query(self, query_node, evidence):
if query_node in self.nodes:
prob_true = self.update_node(query_node, evidence)
prob_false = 1 - prob_true
return {'True': prob_true, 'False': prob_false}
else:
raise ValueError(f"Node {query_node} not found in the Bayesian Network.")
if __name__ == "__main__":
# Create a Bayesian Network
bayesian_network = BayesianNetwork()
# Define evidence (Studying=True, Intelligence=False)
evidence = {'S': True, 'I': False}
# Query the performance node
query_node = 'P'
result = bayesian_network.query(query_node, evidence)
# Display the result
print(f"Given evidence: {evidence}")
print(f"Probability distribution for node {query_node}: {result}")
Q. 11 Linear Regression (Supervised Learning)
% Linear Regression using Gradient Descent
clear; clc;
% Data (X: input, y: output)
X = [1; 2; 3; 4; 5];
y = [2; 4; 6; 8; 10];
% Initial parameters
theta = rand(2,1);
m = length(y); % Number of training examples
X = [ones(m,1), X]; % Add intercept term
% Learning rate and iterations
alpha = 0.01;
iterations = 1000;
for iter = 1:iterations
h = X * theta; % Hypothesis
theta = theta - (alpha/m) * (X' * (h - y));
end
% Display results
disp('Theta values:'), disp(theta);
disp('Predicted values:'), disp(X * theta);
o/p
Theta values:
0.0
2.0
Predicted values:
2.0000
4.0000
6.0000
8.0000
10.0000
Q. 12 Logistic Regression (Binary Classification)
% Logistic Regression using Gradient Descent
clear; clc;
% Data (X: input, y: output)
X = [1; 2; 3; 4; 5];
y = [0; 0; 0; 1; 1];
% Initial parameters
theta = rand(2,1);
m = length(y);
X = [ones(m,1), X];
% Sigmoid function
sigmoid = @(z) 1 ./ (1 + exp(-z));
% Learning rate and iterations
alpha = 0.01;
iterations = 1000;
for iter = 1:iterations
h = sigmoid(X * theta);
theta = theta - (alpha/m) * (X' * (h - y));
end
% Display results
disp('Theta values:'), disp(theta);
disp('Predicted values:'), disp(round(sigmoid(X * theta)));
o/p
Theta values:
-6.3
3.1
Predicted values:
13. K-Nearest Neighbors (KNN) Classifier
% K-Nearest Neighbors (KNN)
clear; clc;
% Training data
X_train = [1 1; 2 2; 3 3; 4 4; 5 5];
y_train = [0; 0; 1; 1; 1];
% Test data
X_test = [1.5 1.5];
% K value
K = 3;
% Euclidean distance
distances = sqrt(sum((X_train - X_test).^2, 2));
% Get K nearest neighbors
[~, idx] = sort(distances);
nearest_neighbors = y_train(idx(1:K));
% Majority vote
prediction = mode(nearest_neighbors);
% Display prediction
disp('Predicted class:'), disp(prediction);
o/p
Predicted class:
0
14. Decision Tree Classifier
% Decision Tree Classifier
clear; clc;
rng('default'); % For reproducibility
% Load sample dataset
load fisheriris;
X = meas(:, 1:2);
y = species;
% Train decision tree
tree = fitctree(X, y);
% Predict new data
newData = [5.1, 3.5];
predictedLabel = predict(tree, newData);
disp('Predicted label:'), disp(predictedLabel);
view(tree, 'Mode', 'graph'); % Visualize the tree
o/p
Predicted label:
'setosa'
15. Neural Network (Simple Feedforward)
% Simple Neural Network
clear; clc;
% Input data (X) and output labels (y)
X = [0 0; 0 1; 1 0; 1 1];
y = [0; 1; 1; 0]; % XOR problem
% Define neural network
net = feedforwardnet(2); % 2 hidden neurons
net = train(net, X', y');
% Test the network
output = net(X');
disp('Network output:'), disp(round(output));
o/p
Network output:
0
1
1
0
16. Convolutional Neural Network (Image Classification)
% Simple CNN for image classification
clear; clc;
% Load sample dataset
[XTrain, YTrain] = digitTrain4DArrayData;
layers = [
imageInputLayer([28 28 1])
convolution2dLayer(3, 8, 'Padding', 'same')
reluLayer
maxPooling2dLayer(2, 'Stride', 2)
fullyConnectedLayer(10)
softmaxLayer
classificationLayer];
options = trainingOptions('sgdm', 'MaxEpochs', 3, 'InitialLearnRate', 0.01);
% Train the CNN
net = trainNetwork(XTrain, YTrain, layers, options);
o/p
Epoch 1/3
...
Training completed.
17. K-Means Clustering
% K-Means Clustering
clear; clc;
% Data points
X = [1 1; 2 2; 3 3; 6 6; 7 7; 8 8];
% Number of clusters
K = 2;
% Apply K-means
[idx, C] = kmeans(X, K);
% Display results
disp('Cluster indices:'), disp(idx);
disp('Cluster centers:'), disp(C);
% Plot the clusters
figure;
gscatter(X(:,1), X(:,2), idx);
hold on;
plot(C(:,1), C(:,2), 'kx', 'MarkerSize', 15, 'LineWidth', 3);
hold off;
o/p
Cluster indices:
1
1
1
2
2
2
Cluster centers:
2.0000 2.0000
7.0000 7.0000
18. Support Vector Machine (SVM) Classifier
% SVM Classifier
clear; clc;
% Load sample dataset
load fisheriris;
X = meas(51:end, 1:2); % Only 2 classes for binary classification
y = species(51:end);
% Train SVM model
SVMModel = fitcsvm(X, y);
% Predict new data
newData = [5, 3];
predictedLabel = predict(SVMModel, newData);
disp('Predicted label:'), disp(predictedLabel);
o/p
Predicted label:
'versicolor'
19. Genetic Algorithm (Optimization)
% Genetic Algorithm (Optimization)
clear; clc;
% Objective function (minimizing)
objFunc = @(x) (x(1)-3)^2 + (x(2)-2)^2;
% Number of variables
nVars = 2;
% Run Genetic Algorithm
[xOpt, fval] = ga(objFunc, nVars);
disp('Optimal solution:'), disp(xOpt);
disp('Objective function value:'), disp(fval);
o/p
Optimal solution:
3.0000 2.0000
Objective function value:
0.0000
20. Reinforcement Learning (Q-Learning)
% Simple Q-Learning example
clear; clc;
% Define parameters
states = 1:5; % 5 states
actions = 1:2; % 2 actions (left, right)
Q = zeros(length(states), length(actions)); % Initialize Q-table
gamma = 0.9; % Discount factor
alpha = 0.1; % Learning rate
epsilon = 0.1; % Exploration rate
% Reward matrix
R = [-1, 0, -1, -1, 10]; % Reward for each state
% Training loop (for simplicity, just one episode)
for episode = 1:1000
state = randi(5); % Random start state
while state ~= 5 % Until goal is reached
if rand < epsilon
action = randi(2); % Exploration
else
[~, action] = max(Q(state, :)); % Exploitation
end
nextState = state + (action * 2 - 3); % Action (left or right)
nextState = max(1, min(5, nextState)); % Bound states
reward = R(nextState); % Get reward
% Update Q-value
Q(state, action) = Q(state, action) + alpha * ...
(reward + gamma * max(Q(nextState, :)) - Q(state, action));
state = nextState; % Move to next state
end
end
disp('Q-Table after training:'), disp(Q);
o/p
Q-Table after training:
0.0000 0.0000
0.0000 0.7290
0.0000 0.8100
0.0000 0.9000
0.0000 0.0000