0% found this document useful (0 votes)
22 views3 pages

AI Algorithms for Sentiment and Pathfinding

The document contains three sections demonstrating different machine learning algorithms: Naive Bayes for sentiment analysis, Decision Tree for classification using data from an Excel file, and A* algorithm for pathfinding in a graph. Each section includes code snippets for training models, making predictions, and visualizing results. The examples illustrate practical applications of these algorithms in Python using libraries like scikit-learn and pandas.

Uploaded by

gcetly.2
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
22 views3 pages

AI Algorithms for Sentiment and Pathfinding

The document contains three sections demonstrating different machine learning algorithms: Naive Bayes for sentiment analysis, Decision Tree for classification using data from an Excel file, and A* algorithm for pathfinding in a graph. Each section includes code snippets for training models, making predictions, and visualizing results. The examples illustrate practical applications of these algorithms in Python using libraries like scikit-learn and pandas.

Uploaded by

gcetly.2
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd

1.

Naive Bayes

from sklearn.feature_extraction.text import CountVectorizer


from sklearn.naive_bayes import MultinomialNB
# Training data
train_sentences = [
"I love this movie",
"This movie is great",
"Wonderful movie",
"I hate this movie",
"This movie is terrible"
]
train_labels = [1, 1,1, 0, 0]
# Vectorize the training data
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform(train_sentences)
# Train the classifier
nb_classifier = MultinomialNB()
nb_classifier.fit(X_train, train_labels)
# Get user input
user_input = input("Enter a sentence: ")
# Vectorize the user input
X_user = vectorizer.transform([user_input])
# Predict sentiment
prediction = nb_classifier.predict(X_user)[0]
# Print prediction
sentiment = "positive" if prediction == 1 else "negative"
print(f"Predicted sentiment for '{user_input}': {sentiment}")

2. Decision tree
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
from sklearn import tree

# Load data from Excel file


df = pd.read_excel("nation.xlsx") # Assuming your Excel file is named "nation.xlsx"

# Mapping categorical variables to numerical values


d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)
d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)
# Features and target variable
features = ['Age', 'Experience', 'Rank', 'Nationality']
X = df[features]
y = df['Go']

# Decision tree classifier


dtree = DecisionTreeClassifier() # Using information gain
dtree = dtree.fit(X, y)

# Plot decision tree


plt.figure(figsize=(12, 8))
tree.plot_tree(dtree, feature_names=features, class_names=['NO', 'YES'], filled=True)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.show()

3. A*
import heapq

def ma_star(graph, start, dest, heuristic, memory_limit):


distances = {vertex: float('inf') for vertex in graph}
distances[start] = 0
parent = {vertex: None for vertex in graph}
visited = set()
pq = [(0 + heuristic[start], 0, start)]
num_nodes = 0
while pq:
curr_f, curr_dist, curr_vert = heapq.heappop(pq)
num_nodes -= 1
if curr_vert == dest:
break
if curr_vert not in visited:
visited.add(curr_vert)
for nbor, weight in graph[curr_vert].items():
distance = curr_dist + weight
f_distance = distance + heuristic[nbor]
if f_distance < distances[nbor]:
distances[nbor] = f_distance
parent[nbor] = curr_vert
if num_nodes < memory_limit:
heapq.heappush(pq, (f_distance, distance, nbor))
num_nodes += 1
elif f_distance < max(pq)[0]:
pq.remove(max(pq))
heapq.heappush(pq, (f_distance, distance, nbor))

return distances, parent

def generate_path_from_parents(parent, start, dest):


path = []
curr = dest
while curr:
path.append(curr)
curr = parent[curr]
return '->'.join(path[::-1])

graph = {
'S': {'A': 2, 'B': 1},
'A': {'B': 3, 'C': 1},
'B': {'D': 4},
'C': {'D': 2},
'D': {}
}

heuristic = {
'S': 5,
'A': 4,
'B': 3,
'C': 2,
'D': 0
}

start = 'S'
dest = 'D'
memory_limit = 2
distances, parent = ma_star(graph, start, dest, heuristic, memory_limit)
print('optimal path => ', generate_path_from_parents(parent, start, dest))

You might also like