0% found this document useful (0 votes)
22 views25 pages

Python Programs

Uploaded by

manasaburri123
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
22 views25 pages

Python Programs

Uploaded by

manasaburri123
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd

Program – 1

Logistic regression

Program Input :
import numpy as np
import pandas as pd
import [Link] as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from [Link] import accuracy_score, confusion_matrix, classification_report
# Example dataset
data = [Link]({
'Hours_Studied': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'Passed': [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
})

print(data)
X = data[['Hours_Studied']]
y = data['Passed']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = LogisticRegression()
[Link](X_train, y_train)
y_pred = [Link](X_test)
y_proba = model.predict_proba(X_test) # probabilities
print("Accuracy:", accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# For visualization
X_range = [Link](0, 10, 100).reshape(-1,1)
y_probs = model.predict_proba(X_range)[:,1]

[Link](figsize=(8,5))
[Link](X_range, y_probs, color='blue')
[Link]("Hours Studied")
[Link]("Probability of Passing")
[Link]("Sigmoid Curve")
[Link](True)
[Link]()

Output :
Hours_Studied
Passed 0 1 0 1 2 0 2 3 0 3 4 0 4 5 0 5 6 1 6 7 1 7 8 1 8 9 1 9 10 1
Accuracy: 0.6666666666666666 [[1 0] [1 1]]
Program – 2
Perceptron Algorithm :
import numpy as np

# Step function for activation


def step_function(x):
return 1 if x >= 0 else 0

# Perceptron training function


def perceptron_train(X, y, learning_rate=0.1, epochs=10):
weights = [Link]([Link][1])
bias = 0

for epoch in range(epochs):


print(f"Epoch {epoch+1}")
for i in range(len(X)):
linear_output = [Link](X[i], weights) + bias
y_pred = step_function(linear_output)
error = y[i] - y_pred

# Update weights and bias


weights += learning_rate * error * X[i]
bias += learning_rate * error

print(f" Sample {i+1}: Error={error}, Weights={weights}, Bias={bias}")


return weights, bias

# Prediction function
def perceptron_predict(X, weights, bias):
predictions = []
for i in X:
linear_output = [Link](i, weights) + bias
[Link](step_function(linear_output))
return predictions

# Example dataset: OR logic gate


X = [Link]([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
y = [Link]([0, 1, 1, 1]) # OR gate output

# Train perceptron
weights, bias = perceptron_train(X, y, learning_rate=0.1, epochs=5)

# Test perceptron
predictions = perceptron_predict(X, weights, bias)
print("\nFinal Predictions:", predictions)
print("Final Weights:", weights)
print("Final Bias:", bias)

Output :
Epoch 1
Sample 1: Error=-1, Weights=[0. 0.], Bias=-0.1
Sample 2: Error=1, Weights=[0. 0.1], Bias=0.0
Sample 3: Error=0, Weights=[0. 0.1], Bias=0.0
Sample 4: Error=0, Weights=[0. 0.1], Bias=0.0

Epoch 2
Sample 1: Error=-1, Weights=[0. 0.1], Bias=-0.1
Sample 2: Error=0, Weights=[0. 0.1], Bias=-0.1

Sample 3: Error=1, Weights=[0.1 0.1], Bias=0.0

Sample 4: Error=0, Weights=[0.1 0.1], Bias=0.0

Epoch 3
Sample 1: Error=-1, Weights=[0.1 0.1], Bias=-0.1
Sample 2: Error=0, Weights=[0.1 0.1], Bias=-0.1
Sample 3: Error=0, Weights=[0.1 0.1], Bias=-0.1
Sample 4: Error=0, Weights=[0.1 0.1], Bias=-0.1

Epoch 4
Sample 1: Error=0, Weights=[0.1 0.1], Bias=-0.1

Sample 2: Error=0, Weights=[0.1 0.1], Bias=-0.1


Sample 3: Error=0, Weights=[0.1 0.1], Bias=-0.1

Sample 4: Error=0, Weights=[0.1 0.1], Bias=-0.1

Epoch 5
Sample 1: Error=0, Weights=[0.1 0.1], Bias=-0.1

Sample 2: Error=0, Weights=[0.1 0.1], Bias=-0.1


Sample 3: Error=0, Weights=[0.1 0.1], Bias=-0.1

Sample 4: Error=0, Weights=[0.1 0.1], Bias=-0.1


Final Predictions: [0, 1, 1, 1]

Final Weights: [0.1 0.1]

Final Bias: -0.1


Program -3
Bayesian Model :
# Bayesian Model: Naive Bayes Classification

from sklearn import datasets


from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from [Link] import accuracy_score, confusion_matrix, classification_report

# Step 1: Load dataset


iris = datasets.load_iris()
X = [Link]
y = [Link]

# Step 2: Split into training and test sets


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)

# Step 3: Create and train Gaussian Naive Bayes model


model = GaussianNB()
[Link](X_train, y_train)

# Step 4: Predictions
y_pred = [Link](X_test)

# Step 5: Evaluation
print("Accuracy:", accuracy_score(y_test, y_pred))
print("\nConfusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("\nClassification Report:\n", classification_report(y_test, y_pred))

# Step 6: Show class probabilities for first 5 samples


print("\nPredicted Probabilities (first 5 samples):\n", model.predict_proba(X_test[:5]))
Accuracy: 0.9777

Output:
Accuracy: 0.9777777777777777
Confusion Matrix: [[19 0 0] [ 0 12 1] [ 0 0 13]]
Classification Report:
precision recall f1-score support
0 1.00 1.00 1.00 19
1 1.00 0.92 0.96 13
2 0.93 1.00 0.96 13
accuracy 0.98 45
macro avg 0.98 0.97 0.97 45
weighted avg 0.98 0.98 0.98 45
Predicted Probabilities (first 5 samples):
[[4.15880005e-088 9.95527834e-001 4.47216606e-003]
[1.00000000e+000 1.31031235e-013 2.21772205e-020]
[9.83170191e-285 2.70138564e-012 1.00000000e+000]
[9.54745274e-092 9.74861431e-001 2.51385686e-002]
[1.08679560e-103 8.31910700e-001 1.68089300e-001]]
Program – 3
Numerical Optimization:
# Numerical Optimization: Gradient Descent Example

def f(x):
return x**2 + 4*x + 4 # Function to minimize

def f_prime(x):
return 2*x + 4 # Derivative of f(x)

# Parameters
x = 5.0 # Starting guess
learning_rate = 0.1
tolerance = 1e-6
max_iterations = 100

print("Starting Gradient Descent:")


for i in range(max_iterations):
grad = f_prime(x) # Compute gradient
new_x = x - learning_rate * grad
print(f"Iter {i+1}: x = {new_x:.6f}, f(x) = {f(new_x):.6f}")

if abs(new_x - x) < tolerance: # Stop if change is tiny


break
x = new_x

print(f"\nMinimum found at x = {x:.6f}, f(x) = {f(x):.6f}")


Output:
Starting Gradient Descent:
Iter 1: x = 3.600000, f(x) = 31.360000
Iter 2: x = 2.480000, f(x) = 20.070400
Iter 3: x = 1.584000, f(x) = 12.845056
Iter 4: x = 0.867200, f(x) = 8.220836
Iter 5: x = 0.293760, f(x) = 5.261335
Iter 6: x = -0.164992, f(x) = 3.367254
Iter 7: x = -0.531994, f(x) = 2.155043
Iter 8: x = -0.825595, f(x) = 1.379227
Iter 9: x = -1.060476, f(x) = 0.882706
Iter 10: x = -1.248381, f(x) = 0.564932
Iter 11: x = -1.398705, f(x) = 0.361556
Iter 12: x = -1.518964, f(x) = 0.231396
Iter 13: x = -1.615171, f(x) = 0.148093
Iter 14: x = -1.692137, f(x) = 0.094780
Iter 15: x = -1.753709, f(x) = 0.060659
Iter 16: x = -1.802968, f(x) = 0.038822
Iter 17: x = -1.842374, f(x) = 0.024846
Iter 18: x = -1.873899, f(x) = 0.015901
Iter 19: x = -1.899119, f(x) = 0.010177
Iter 20: x = -1.919295, f(x) = 0.006513
Iter 21: x = -1.935436, f(x) = 0.004168
Iter 22: x = -1.948349, f(x) = 0.002668
Iter 24: x = -1.966943, f(x) = 0.001093
Iter 25: x = -1.973555, f(x) = 0.000699
Iter 26: x = -1.978844, f(x) = 0.000448
Iter 27: x = -1.983075, f(x) = 0.000286
Iter 28: x = -1.986460, f(x) = 0.000183
Iter 29: x = -1.989168, f(x) = 0.000117
Iter 30: x = -1.991334, f(x) = 0.000075
Iter 31: x = -1.993068, f(x) = 0.000048
Iter 32: x = -1.994454, f(x) = 0.000031
Iter 33: x = -1.995563, f(x) = 0.000020
Iter 34: x = -1.996451, f(x) = 0.000013
Iter 35: x = -1.997160, f(x) = 0.000008
Iter 36: x = -1.997728, f(x) = 0.000005
Iter 37: x = -1.998183, f(x) = 0.000003
Iter 38: x = -1.998546, f(x) = 0.000002
Iter 39: x = -1.998837, f(x) = 0.000001
Iter 40: x = -1.999070, f(x) = 0.000001
Iter 41: x = -1.999256, f(x) = 0.000001
Iter 42: x = -1.999405, f(x) = 0.000000
Iter 43: x = -1.999524, f(x) = 0.000000
Iter 44: x = -1.999619, f(x) = 0.000000
Iter 45: x = -1.999695, f(x) = 0.000000
Iter 46: x = -1.999756, f(x) = 0.000000
Iter 47: x = -1.999805, f(x) = 0.000000
Iter 48: x = -1.999844, f(x) = 0.000000
Iter 49: x = -1.999875, f(x) = 0.000000
Iter 50: x = -1.999900, f(x) = 0.000000
Iter 51: x = -1.999920, f(x) = 0.000000
Iter 52: x = -1.999936, f(x) = 0.000000
Iter 53: x = -1.999949, f(x) = 0.000000
Iter 54: x = -1.999959, f(x) = 0.000000
Iter 55: x = -1.999967, f(x) = 0.000000
Iter 56: x = -1.999974, f(x) = 0.000000
Iter 57: x = -1.999979, f(x) = 0.000000
Iter 58: x = -1.999983, f(x) = 0.000000
Iter 59: x = -1.999987, f(x) = 0.000000
Iter 60: x = -1.999989, f(x) = 0.000000
Iter 61: x = -1.999991, f(x) = 0.000000
Iter 62: x = -1.999993, f(x) = 0.000000
Iter 63: x = -1.999995, f(x) = 0.000000
Iter 64: x = -1.999996, f(x) = 0.000000
Iter 65: x = -1.999996, f(x) = 0.000000
Minimum found at x = -1.999996, f(x) = 0.000000
Program – 5
Classification Rates:
# Program to calculate classification rates

from [Link] import accuracy_score, precision_score, recall_score, f1_score,


confusion_matrix

# Example: True and predicted labels


y_true = [1, 0, 1, 1, 0, 1, 0, 0, 1, 0] # Actual values
y_pred = [1, 0, 1, 0, 0, 1, 1, 0, 1, 0] # Predicted values

# Confusion Matrix
cm = confusion_matrix(y_true, y_pred)
print("Confusion Matrix:\n", cm)

# Classification Rates
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)

# Display results
print("\nClassification Rates:")
print(f"Accuracy: {accuracy:.2f}")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 Score: {f1:.2f}")
Output :
Confusion Matrix: [[4 1] [1 4]]
Classification Rates:
Accuracy: 0.80
Precision: 0.80
Recall: 0.80
F1 Score: 0.80
Program – 6
Regression Analysis :
# Regression Analysis using Linear Regression

import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.linear_model import LinearRegression
from [Link] import mean_squared_error, r2_score

# Step 1: Sample dataset


data = {
'Hours_Studied': [2, 3, 4, 5, 6, 7, 8, 9],
'Exam_Score': [50, 55, 60, 65, 70, 76, 82, 88]
}
df = [Link](data)

# Step 2: Split into features (X) and target (y)


X = df[['Hours_Studied']]
y = df['Exam_Score']

# Step 3: Create and train model


model = LinearRegression()
[Link](X, y)

# Step 4: Predictions
y_pred = [Link](X)

# Step 5: Evaluation
print("Intercept (β0):", model.intercept_)
print("Coefficient (β1):", model.coef_[0])
print("Mean Squared Error (MSE):", mean_squared_error(y, y_pred))
print("R² Score:", r2_score(y, y_pred))

# Step 6: Visualization
[Link](X, y, color='blue', label='Actual Data')
[Link](X, y_pred, color='red', linewidth=2, label='Regression Line')
[Link]("Hours Studied")
[Link]("Exam Score")
[Link]("Regression Analysis: Hours Studied vs Exam Score")
[Link]()
[Link]()

Output :
Intercept (β0): 38.523809523809504
Coefficient (β1): 5.404761904761908
Mean Squared Error (MSE): 0.3273809523809522
R² Score: 0.9978698270686884
Program – 7
L – Nearest Neighbour :
import numpy as np

def l_distance(x1, x2, L=2):


"""Compute L-p distance between two points."""
return [Link]([Link](x1 - x2) ** L) ** (1 / L)

def l_nearest_neighbour(train_X, train_y, test_point, L=2):


"""Find the label of the nearest neighbour using L-p norm."""
distances = [l_distance(test_point, x, L) for x in train_X]
nearest_index = [Link](distances)
return train_y[nearest_index]

# Example dataset (features + labels)


X_train = [Link]([
[1, 2],
[2, 3],
[3, 3],
[6, 5]
])
y_train = [Link](["A", "A", "B", "B"])

# Test point
X_test = [Link]([
[2, 2],
[5, 5]
])
# Classify test points
L_value = 1 # Try 1 (Manhattan), 2 (Euclidean), etc.
for point in X_test:
label = l_nearest_neighbour(X_train, y_train, point, L=L_value)
print(f"Test point {point} → Predicted class: {label}")

Output :
Test point [2 2] → Predicted class: A
Test point [5 5] → Predicted class: B
Program – 8
Back Propagation :
import numpy as np

# Sigmoid activation function & derivative


def sigmoid(x):
return 1 / (1 + [Link](-x))

def sigmoid_derivative(x):
return x * (1 - x)

# Input dataset (XOR problem)


X = [Link]([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])

# Output labels
y = [Link]([[0], [1], [1], [0]])

# Seed for reproducibility


[Link](42)

# Network architecture
input_neurons = 2
hidden_neurons = 2
output_neurons = 1
# Random weight initialization
W1 = [Link](-1, 1, (input_neurons, hidden_neurons))
W2 = [Link](-1, 1, (hidden_neurons, output_neurons))
b1 = [Link](-1, 1, (1, hidden_neurons))
b2 = [Link](-1, 1, (1, output_neurons))

# Training parameters
learning_rate = 0.5
epochs = 10000

# Training loop
for epoch in range(epochs):
# Forward pass
hidden_input = [Link](X, W1) + b1
hidden_output = sigmoid(hidden_input)

final_input = [Link](hidden_output, W2) + b2


final_output = sigmoid(final_input)

W2 += hidden_output.[Link](d_final) * learning_rate # Error calculation


error = y - final_output

# Backpropagation
d_final = error * sigmoid_derivative(final_output)
error_hidden = d_final.dot(W2.T)
d_hidden = error_hidden * sigmoid_derivative(hidden_output)

# Update weights and biases


b2 += [Link](d_final, axis=0, keepdims=True) * learning_rate
W1 += [Link](d_hidden) * learning_rate
b1 += [Link](d_hidden, axis=0, keepdims=True) * learning_rate

# Print error every 2000 epochs


if epoch % 2000 == 0:
print(f"Epoch {epoch}, Error: {[Link]([Link](error)):.6f}")

# Final results
print("\nFinal Output after Training:")
print(final_output)

Output :
Epoch 0, Error: 0.501119
Epoch 2000, Error: 0.082881
Epoch 4000, Error: 0.043086
Epoch 6000, Error: 0.032340
Epoch 8000, Error: 0.026887
Final Output after Training:
[[0.02254657] [0.9747476 ] [0.97447942] [0.02054648]]
Program – 9
SVM for Regression Analysis :

from sklearn import datasets


from sklearn.model_selection import train_test_split
from [Link] import SVC
from [Link] import accuracy_score, confusion_matrix, classification_report

# Step 1: Load dataset


iris = datasets.load_iris()
X = [Link]
y = [Link]

# Step 2: Split into training & testing sets


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)

# Step 3: Create SVM model


# kernel can be 'linear', 'poly', 'rbf', 'sigmoid'
model = SVC(kernel='linear', C=1.0)

# Step 4: Train the model


[Link](X_train, y_train)

# Step 5: Predictions
y_pred = [Link](X_test)

# Step 6: Evaluation
print("Accuracy:", accuracy_score(y_test, y_pred))
print("\nConfusion Matrix:\n", confusion_matrix(y_test, y_pred))
print("\nClassification Report:\n", classification_report(y_test, y_pred))

Output :
Accuracy: 1.0
Confusion Matrix: [[19 0 0] [ 0 13 0] [ 0 0 13]]
Classification Report:
precision recall f1-score support
0 1.00 1.00 1.00 19
1 1.00 1.00 1.00 13
2 1.00 1.00 1.00 13
accuracy 1.00 45
macro avg 1.00 1.00 1.00 45
weighted avg 1.00 1.00 1.00 45
Program – 10
Random forests and Analysis :
import numpy as np
import [Link] as plt
from [Link] import RandomForestRegressor
from [Link] import mean_squared_error, r2_score
from [Link] import make_regression
from sklearn.model_selection import train_test_split

# Step 1: Create synthetic dataset


X, y = make_regression(
n_samples=200, # number of samples
n_features=3, # number of features
noise=0.2, # add some noise
random_state=42
)

# Step 2: Split dataset into train and test sets


X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)

# Step 3: Create Random Forest Regressor


model = RandomForestRegressor(
n_estimators=100, # number of trees
random_state=42
)

# Step 4: Train the model


[Link](X_train, y_train)
# Step 5: Predictions
y_pred = [Link](X_test)

# Step 6: Evaluation metrics


print("Mean Squared Error:", mean_squared_error(y_test, y_pred))
print("R² Score:", r2_score(y_test, y_pred))

# Step 7: Feature importance analysis


feature_importances = model.feature_importances_
for i, score in enumerate(feature_importances):
print(f"Feature {i+1} importance: {score:.4f}")

# Step 8: Plot actual vs predicted


[Link](y_test, y_pred, color='blue')
[Link]("Actual Values")
[Link]("Predicted Values")
[Link]("Random Forest Regression - Actual vs Predicted")
[Link]([min(y_test), max(y_test)], [min(y_test), max(y_test)], 'r--')
[Link]()

# Step 9: Plot feature importances


[Link](range(len(feature_importances)), feature_importances, color='green')
[Link](range(len(feature_importances)), [f"Feature {i+1}" for i in
range(len(feature_importances))])
[Link]("Importance Score")
[Link]("Feature Importance from Random Forest")
[Link]()
Output :
Mean Squared Error: 666.9975539363563
R² Score: 0.9318828523810782
Feature 1 importance: 0.4343
Feature 2 importance: 0.0370
Feature 3 importance: 0.5286

You might also like