0% found this document useful (0 votes)
19 views24 pages

Aiml Programs

The document contains multiple program codes demonstrating different algorithms and applications, including BFS and DFS for graph traversal, N-Queens problem solving, logical expression evaluation, a chatbot implementation, and machine learning models for classification and regression using libraries like sklearn and PyTorch. Each section includes example outputs showcasing the functionality of the respective code. The document serves as a comprehensive guide for implementing various algorithms and machine learning techniques.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
19 views24 pages

Aiml Programs

The document contains multiple program codes demonstrating different algorithms and applications, including BFS and DFS for graph traversal, N-Queens problem solving, logical expression evaluation, a chatbot implementation, and machine learning models for classification and regression using libraries like sklearn and PyTorch. Each section includes example outputs showcasing the functionality of the respective code. The document serves as a comprehensive guide for implementing various algorithms and machine learning techniques.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 24

PROGRAM CODE

from collections import deque


def bfs_with_trace(graph, start, goal):
queue = deque([[start]])
visited = set()
print(f"{'Expand Node':<12} | Fringe")
while queue:
path = queue.popleft()
node = path[-1]
if node in visited:
continue
visited.add(node)
# Calculate current fringe
fringe = [p[-1] for p in queue]
print(f"{node:<12} | {', '.join(fringe)}")
if node == goal:
print("\nPath:", ' => '.join(path))
return
for neighbor in graph.get(node, []):
if neighbor not in visited:
new_path = list(path)
new_path.append(neighbor)
queue.append(new_path)
print("\nGoal not found.")

graph = { # Example graph (Adjacency list)


'A': ['S', 'B'],
'B': ['S'],
'S': ['C', 'G'],
'C': ['E', 'D', 'H', 'F'],
'G': ['H', 'F', 'C'],
'E': [],
'D': [],
'H': [],
'F': []
}
bfs_with_trace(graph, 'A', 'H') # Run the BFS
OUTPUT
Expand Node | Fringe

A | S, B
S | B, C, G

B | C, G
C | G, E, D, H, F

G | E, D, H, F, H, F, C
E | D, H, F, H, F, C

D | H, F, H, F, C
H | F, H, F, C

Path: A => S => C => H


PROGRAM CODE
def dfs(graph, start, goal, path=None, visited=None):
if visited is None:
visited = set()
if path is None:
path = []
visited.add(start)
path.append(start)
if start == goal:
return path
for neighbor in graph.get(start, []):
if neighbor not in visited:
result = dfs(graph, neighbor, goal, path.copy(), visited.copy())
if result:
return result
return None

graph = { # Example graph


'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': [],
'E': ['F'],
'F': [] }
# Run DFS
start_node = 'A'
goal_node = 'F'
path = dfs(graph, start_node, goal_node)

print("Path from", start_node, "to", goal_node, ":", path)

OUTPUT
Path from A to F : ['A', 'B', 'E', 'F']
PROGRAM CODE
def is_safe(queens, row, col):
for r in range(row):
c = queens[r]
if c == col or abs(c - col) == abs(r - row):
return False
return True

def solve(n, row=0, queens=[]):


if row == n:
print_board(queens, n)
return True
for col in range(n):
if is_safe(queens, row, col):
queens.append(col)
if solve(n, row + 1, queens):
return True
queens.pop()
return False

def print_board(queens, n):


for q in queens:
print(''.join('Q' if i == q else '.' for i in range(n)))
print()

# Set N here
n = int(input("Enter the number of queens: "))
solve(n)
OUTPUT
Enter the number of queens: 8

You entered: 8
Q.......

....Q...
.......Q

.....Q..
..Q.....

......Q.
.Q......

...Q....
PROGRAM CODE
import itertools

def evaluate(expr, model):


if isinstance(expr, str):

return model[expr]
op = expr[0]

if op == 'not':
return not evaluate(expr[1], model)

elif op == 'and':
return evaluate(expr[1], model) and evaluate(expr[2], model)

elif op == 'or':
return evaluate(expr[1], model) or evaluate(expr[2], model)

elif op == 'implies':
return not evaluate(expr[1], model) or evaluate(expr[2], model)

def get_variables(expr):
if isinstance(expr, str):

return {expr}
elif expr[0] == 'not':

return get_variables(expr[1])
else:
return get_variables(expr[1]) | get_variables(expr[2])

def truth_table(expr):
vars = sorted(get_variables(expr))

print("\nTruth Table for:", expr)


print(" | ".join(vars + ['Result']))

print("-" * (6 * len(vars) + 10))


for values in itertools.product([False, True], repeat=len(vars)):
model = dict(zip(vars, values))
result = evaluate(expr, model)

row = [str(model[v]) for v in vars] + [str(result)]


print(" | ".join(row))

def parse_expr(s):
try:

return eval(s)
except:

print("Invalid expression format. Use tuples like: ('implies', ('and', 'A', 'B'), 'C')")
exit()

# --- Main ---


print("Enter the logical formula using tuples.")

print("Supported: 'and', 'or', 'not', 'implies'")


print("Example: ('implies', ('and', 'A', 'B'), 'C')\n")

user_input = input("Enter formula: ")

formula = parse_expr(user_input)
truth_table(formula)
OUTPUT
Example: How to Enter Formulas

• ('and', 'A', 'B') → A ∧ B


• ('or', 'A', 'B') → A ∨ B
• ('not', 'A') → ¬A
• ('implies', 'A', 'B') → A → B

Truth Table:
Enter formula: ('implies', ('and', 'A', 'B'), 'C')

Truth Table for: ('implies', ('and', 'A', 'B'), 'C')


A |B |C | Result

------------------------------
False | False | False | True

False | False | True | True


False | True | False | True

False | True | True | True


True | False | False | True

True | False | True | True


True | True | False | False
True | True | True | True
PROGRAM CODE
import nltk
from nltk.chat.util import Chat, reflections
import tkinter as tk
from tkinter import scrolledtext

# Define chatbot patterns and responses


pairs = [
(r"(hi|hello|hey)", ["Hello! How can I assist you with university information today?"]),

(r"what are the library hours?",


["The library is open from 8 AM to 10 PM on weekdays and 10 AM to 6 PM on weekends."]),

(r"where is the library located?",


["The library is located in the center of campus, near the administration building."]),

(r"how can I access online resources?",


["You can access online resources via the university portal using your student login."]),

(r"what courses are available in computer science?",


["Computer Science courses include Data Structures, Algorithms, AI, Web Development, and Machine
Learning."]),

(r"how do I check my class schedule?",


["You can check your class schedule on the student portal under 'My Timetable'."]),

(r"who do I contact for academic help?",


["You can contact your academic advisor or the student help desk at [email protected]."]),

(r"what events are happening this week?",


["Check the campus events calendar on the university website for this week's activities."]),

(r"how do I join student clubs?",


["You can join clubs by signing up at the Student Life office or during club fairs."]),

(r"(bye|quit|exit)",
["Goodbye! Feel free to ask again anytime."]),

(r"(.*)",
["I'm sorry, I didn't understand that. Can you please rephrase your question?"])
]
chatbot = Chat(pairs, reflections)
# Create GUI window
def start_gui():

def send():
user_input = entry.get()

chat_window.insert(tk.END, f"You: {user_input}\n")


entry.delete(0, tk.END)

if user_input.lower() == "quit":
chat_window.insert(tk.END, "Chatbot: Goodbye!\n")

root.quit()
else:

response = chatbot.respond(user_input)
chat_window.insert(tk.END, f"Chatbot: {response}\n")

root = tk.Tk()
root.title("Chatbot")

chat_window = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=60, height=20,


font=("Arial", 12))

chat_window.pack(padx=10, pady=10)

entry = tk.Entry(root, width=50, font=("Arial", 12))


entry.pack(padx=10, pady=5)
send_button = tk.Button(root, text="Send", command=send, font=("Arial", 12))

send_button.pack(pady=5)
root.mainloop()

start_gui()
OUTPUT
PROGRAM CODE
from sklearn import datasets

from sklearn.metrics import confusion_matrix, accuracy_score


from sklearn.model_selection import train_test_split

from sklearn.naive_bayes import GaussianNB


# Load the Iris dataset

iris = datasets.load_iris()
X = iris.data

Y = iris.target
# Split the dataset into training and testing sets

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=1/3, random_state=42)


# Train a Gaussian Naive Bayes classifier

model = GaussianNB()
model.fit(X_train, Y_train)

# Make predictions on the test set

model_predictions = model.predict(X_test)
# Print predictions and actual labels

print("\nPredictions:", model_predictions)
print("\nActual labels:", Y_test)
# Calculate and print the accuracy score

accuracyScore = accuracy_score(Y_test, model_predictions)


print("\nAccuracy Score:", accuracyScore)

# Create and print the confusion matrix

cm = confusion_matrix(Y_test, model_predictions)
print("\nConfusion Matrix:\n", cm)
OUTPUT
Predictions: [1 0 2 1 1 0 1 2 1 1 2 0 0 0 0 2 2 1 1 2 0 2 0 2 2 2 2 2 0 0 0 0 1 0 0 2 1 0 0 0 2 1 1 0 0
1 1 2 1 2]

Actual labels: [1 0 2 1 1 0 1 2 1 1 2 0 0 0 0 1 2 1 1 2 0 2 0 2 2 2 2 2 0 0 0 0 1 0 0 2 1 0 0 0 2 1 1 0
0 1 2 2 1 2]

Accuracy Score: 0.96

Confusion Matrix:
[[19 0 0]
[ 0 14 1]
[ 0 1 15]]
PROGRAM CODE
import numpy as np
from sklearn import datasets
import torch
import torch.nn as nn
import torch.optim as optim
import torchbnn as bnn
import matplotlib.pyplot as plt

# Load Iris dataset


dataset = datasets.load_iris()
data = dataset.data
target = dataset.target

# Convert dataset to PyTorch tensors


data_tensor = torch.from_numpy(data).float()
target_tensor = torch.from_numpy(target).long()

# Define the Bayesian Neural Network model


model = nn.Sequential(
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=4, out_features=100),
nn.ReLU(),
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=100, out_features=3)
)

# Define the loss functions


cross_entropy_loss = nn.CrossEntropyLoss()
klloss = bnn.BKLLoss(reduction='mean', last_layer_only=False)

# Define the optimizer


optimizer = optim.Adam(model.parameters(), lr=0.01)

# Training loop
for step in range(3000):
# Forward pass
models = model(data_tensor)

# Calculate Cross Entropy and KL loss


cross_entropy = cross_entropy_loss(models, target_tensor)
kl = klloss(model)
# Total cost
total_cost = cross_entropy + 0.01 * kl

# Backward pass
optimizer.zero_grad()
total_cost.backward()
optimizer.step()

# Final prediction and accuracy calculation


models = model(data_tensor)
_, predicted = torch.max(models.data, 1)
correct = (predicted == target_tensor).sum()
accuracy = 100 * float(correct) / target_tensor.size(0)

# Final output
cross_entropy = cross_entropy_loss(models, target_tensor)
kl = klloss(model)
print(f"Final Accuracy: {accuracy:.2f}%")
print(f"Final CE: {cross_entropy.item():.2f}, Final KL: {kl.item():.2f}")

# Function to plot the results


def draw_graph(predicted):
fig = plt.figure(figsize=(16, 8))
fig_1 = fig.add_subplot(1, 2, 1)
fig_2 = fig.add_subplot(1, 2, 2)

# Plot the true labels


z1_plot = fig_1.scatter(data[:, 0], data[:, 1], c=target, marker='v')
# Plot the predicted labels
z2_plot = fig_2.scatter(data[:, 0], data[:, 1], c=predicted)

plt.colorbar(z1_plot, ax=fig_1)
plt.colorbar(z2_plot, ax=fig_2)
fig_1.set_title("REAL")
fig_2.set_title("PREDICT")
plt.show()

# Final predictions and plot


draw_graph(predicted)
OUTPUT

Final Accuracy: 96.67%

Final CE: 0.06, Final KL: 3.21


PROGRAM CODE
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score

# Sample dataset
data = {
'HouseSize': [500, 800, 1000, 1200, 1500, 1800, 2000],
'Bedrooms': [1, 2, 2, 3, 3, 4, 4],
'Price': [100000, 130000, 150000, 170000, 200000, 230000, 250000]
}

df = pd.DataFrame(data)

# Features and target


X = df[['HouseSize', 'Bedrooms']]
y = df['Price']

# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# Train model
model = LinearRegression()
model.fit(X_train, y_train)

# Predict with user input


try:
size = float(input("Enter house size in square feet: "))
bedrooms = int(input("Enter number of bedrooms: "))
predicted_price = model.predict([[size, bedrooms]])
print(f"\nPredicted house price: ₹{round(predicted_price[0], 2)}")
except ValueError:
print("Invalid input. Please enter numeric values for size and bedrooms.")

# Evaluate the model (optional)


y_pred = model.predict(X_test)
print("\n--- Model Evaluation ---")
print("Mean Squared Error (MSE):", mean_squared_error(y_test, y_pred))
print("R² Score:", r2_score(y_test, y_pred))
OUTPUT

Enter house size in square feet: 1600


Enter number of bedrooms: 3

Predicted house price: ₹212386.71

--- Model Evaluation ---


Mean Squared Error (MSE): 6000000.0
R² Score: 0.99
PROGRAM CODE
# Import libraries
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report

# Step 1: Create sample data


data = {
'HouseSize': [500, 800, 1000, 1200, 1500, 1800, 2000],
'Bedrooms': [1, 2, 2, 3, 3, 4, 4],
'Expensive': [0, 0, 0, 0, 1, 1, 1] # 0 = Not Expensive, 1 = Expensive
}

df = pd.DataFrame(data)

# Step 2: Prepare features and target


X = df[['HouseSize', 'Bedrooms']]
y = df['Expensive']

# Step 3: Split the dataset


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# Step 4: Train the model


model = LogisticRegression()
model.fit(X_train, y_train)

# Step 5: Predict on test set


y_pred = model.predict(X_test)

# Step 6: Evaluate the model


print("Accuracy:", accuracy_score(y_test, y_pred))
print("\nClassification Report:\n", classification_report(y_test, y_pred))

# Step 7: Predict for new input


size = float(input("Enter house size in sqft: "))
bedrooms = int(input("Enter number of bedrooms: "))
prediction = model.predict([[size, bedrooms]])
label = "Expensive" if prediction[0] == 1 else "Not Expensive"
print(f"The predicted class for the house is: {label}")
OUTPUT
Enter house size in sqft: 1600
Enter number of bedrooms: 3
The predicted class for the house is: Expensive
Accuracy: 1.0
PROGRAM CODE
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.mixture import GaussianMixture
from sklearn.metrics import accuracy_score

# Step 1: Create synthetic classification data


X, y = make_classification(n_samples=200, n_features=2, n_informative=2,
n_redundant=0, n_clusters_per_class=1, random_state=42)

df = pd.DataFrame(X, columns=['Feature1', 'Feature2'])


df['Label'] = y

# Step 2: Split data for Decision Tree


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# Step 3: Train Decision Tree


tree_model = DecisionTreeClassifier(max_depth=3, random_state=42)
tree_model.fit(X_train, y_train)
# Step 4: Predict and evaluate
y_pred = tree_model.predict(X_test)
print("Decision Tree Accuracy:", accuracy_score(y_test, y_pred))

# Step 5: Visualize the Decision Tree


plt.figure(figsize=(12, 6))
plot_tree(tree_model, filled=True, feature_names=['Feature1', 'Feature2'])
plt.title("Decision Tree")
plt.show()

# Step 6: Apply Gaussian Mixture Model


gmm = GaussianMixture(n_components=2, random_state=42)
gmm_labels = gmm.fit_predict(X)

# Step 7: Visualize GMM Clusters


plt.figure(figsize=(8, 6))
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=gmm_labels, palette="coolwarm", s=70)
plt.title("Gaussian Mixture Model Clusters")
plt.xlabel("Feature1")
plt.ylabel("Feature2")
plt.grid(True)
plt.show()

OUTPUT
Decision Tree Accuracy: 0.9333333333333333
DecisionTreeClassifier(max_depth=3)
|--- Feature1 <= 0.75
| |--- Feature2 <= -0.33
| | |--- class: 0
| |--- Feature2 > -0.33
| | |--- class: 1
|--- Feature1 > 0.75
| |--- Feature2 <= 0.15
| | |--- class: 1
| |--- Feature2 > 0.15
| | |--- class: 0
PROGRAM CODE
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import plot_tree

# Step 1: Load Iris dataset


iris = load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names
target_names = iris.target_names

# Step 2: Split data into train and test sets


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# Step 3: Create and train the Random Forest model


rf_model = RandomForestClassifier(n_estimators=5, random_state=42)
rf_model.fit(X_train, y_train)

# Step 4: Predict and evaluate


y_pred = rf_model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Random Forest Accuracy:", accuracy)

# Step 5: Feature importance


importances = rf_model.feature_importances_
print("\nFeature Importances:")
for name, importance in zip(feature_names, importances):
print(f"{name}: {importance:.4f}")

# Step 6: Visualize a few trees in the forest


plt.figure(figsize=(15, 10))
for i in range(3): # Display first 3 trees
plt.subplot(1, 3, i + 1)
plot_tree(rf_model.estimators_[i],
feature_names=feature_names,
class_names=target_names,
filled=True,
rounded=True,
fontsize=6)
plt.title(f"Tree {i+1}")
plt.tight_layout()
plt.show()

OUTPUT

Random Forest Accuracy: 1.0


Feature Importances:
sepal length (cm): 0.0874
sepal width (cm): 0.0141
petal length (cm): 0.4352
petal width (cm): 0.4633

You might also like