22MCA1008 - Varun ML LAB ASSIGNMENTS
22MCA1008 - Varun ML LAB ASSIGNMENTS
import pandas as pd
dataFrame = pd.read_csv(r"C:\Users\varun\Downloads\Iris.csv")
print("\nReading the CSV file...\n",dataFrame)
import itertools
import statsmodels.api as sm
length_width.describe()
y = length_width['sepal.length']
x1= length_width['sepal.width']
plt.scatter(x1,y)
model = np.polyfit(x1, y, 1)
X_mat=np.vstack((np.ones(len(x1)), x1)).T
beta_hat = np.linalg.inv(X_mat.T.dot(X_mat)).dot(X_mat.T).dot(y)
print(beta_hat)
yhat = X_mat.dot(beta_hat)
plt.scatter(x1, y)
plt.plot(x1, yhat, color='red')
1
Reading the CSV file…
sepal.length sepal.width petal.length petal.width variety
0 5.1 3.5 1.4 0.2 Setosa
1 4.9 3.0 1.4 0.2 Setosa
2 4.7 3.2 1.3 0.2 Setosa
3 4.6 3.1 1.5 0.2 Setosa
4 5.0 3.6 1.4 0.2 Setosa
.. … … … … …
145 6.7 3.0 5.2 2.3 Virginica
146 6.3 2.5 5.0 1.9 Virginica
147 6.5 3.0 5.2 2.0 Virginica
148 6.2 3.4 5.4 2.3 Virginica
149 5.9 3.0 5.1 1.8 Virginica
2
[ ]:
3
SVM
1
plt.show()
random_state = 5)
classifier = SVC(kernel = 'poly',gamma = 'auto' ,degree = 3, C = 10)
classifier.fit(X_train, y_train)
2
[ ]:
3
ID3-Decision Tree
[8]: "D:\MCA\PlayTennis.csv"
total_class_entr = - (total_class_count/total_row)*np.
↪log2(total_class_count/total_row) #entropy of the class
return total_entr
for c in class_list:
1
label_class_count = feature_value_data[feature_value_data[label] == c].
↪shape[0] #row count of class c
entropy_class = 0
if label_class_count != 0:
probability_class = label_class_count/class_count #probability of␣
↪the class
entropy += entropy_class
return entropy
total_row = train_data.shape[0]
feature_info = 0.0
feature_value_count = feature_value_data.shape[0]
feature_value_entropy = calc_entropy(feature_value_data, label,␣
↪class_list) #calculcating entropy for the feature value
feature_value_probability = feature_value_count/total_row
feature_info += feature_value_probability * feature_value_entropy␣
↪#calculating information of the feature value
max_info_gain = -1
max_info_feature = None
max_info_gain = feature_info_gain
max_info_feature = feature
2
return max_info_feature
assigned_to_node = True
if not assigned_to_node: #not pure class
tree[feature_value] = "?" #as feature_value is not a pure class, it␣
↪should be expanded further,
next_root = None
3
for node, branch in list(next_root.items()): #iterating the tree node
if branch == "?": #if it is expandable
feature_value_data = train_data[train_data[max_info_feature] ==␣
↪node] #using the updated dataset
return tree
C:\Users\varun\AppData\Local\Temp\ipykernel_42712\608907770.py:5: FutureWarning:
iteritems is deprecated and will be removed in a future version. Use .items
instead.
for feature_value, count in feature_value_count_dict.iteritems():
C:\Users\varun\AppData\Local\Temp\ipykernel_42712\608907770.py:5: FutureWarning:
iteritems is deprecated and will be removed in a future version. Use .items
instead.
for feature_value, count in feature_value_count_dict.iteritems():
C:\Users\varun\AppData\Local\Temp\ipykernel_42712\608907770.py:5: FutureWarning:
iteritems is deprecated and will be removed in a future version. Use .items
instead.
for feature_value, count in feature_value_count_dict.iteritems():
[17]: print(tree)
else:
return None
4
[19]: def evaluate(tree, test_data_m, label):
correct_preditct = 0
wrong_preditct = 0
for index, row in test_data_m.iterrows(): #for each row in the dataset
result = predict(tree, test_data_m.iloc[index]) #predict the row
if result == test_data_m[label].iloc[index]: #predicted value and␣
↪expected value is same or not
return accuracy
[22]: print(accuracy)
1.0
5
CNN
1
[4]: # load dataset
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape dataset to have a single channel
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
2
[22]: # evaluate a model using k-fold cross-validation
def evaluate_model(dataX, dataY, n_folds=5):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# define model
model = define_model()
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix],␣
↪dataX[test_ix], dataY[test_ix]
# fit model
history = model.fit(trainX, trainY, epochs=10, batch_size=32,␣
↪validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# stores scores
scores.append(acc)
histories.append(history)
return scores, histories
plt.show()
3
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# evaluate model
scores, histories = evaluate_model(trainX, trainY)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
> 98.483
4
[26]: # save the final model to file
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu',␣
↪kernel_initializer='he_uniform'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
5
# compile model
opt = SGD(learning_rate=0.01, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy',␣
↪metrics=['accuracy'])
return model
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
6
run_test_harness()
> 98.990
C:\Users\eg1\AppData\Roaming\Python\Python39\site-
packages\keras\utils\image_utils.py:409: UserWarning: grayscale is deprecated.
Please use color_mode = "grayscale"
warnings.warn(
1/1 [==============================] - 0s 64ms/step
7
[ ]:
7
Backpropogation
Y = iris.Species
Y = one_hot_encoder.fit_transform(np.array(Y).reshape(-1, 1))
Y[:5]
hidden_layers = len(nodes) - 1
weights = InitializeWeights(nodes)
1
for epoch in range(1, epochs+1):
weights = Train(X_train, Y_train, lr, weights)
if(epoch % 20 == 0):
print("Epoch {}".format(epoch))
print("Training Accuracy:{}".format(Accuracy(X_train, Y_train,␣
↪weights)))
if X_val.any():
print("Validation Accuracy:{}".format(Accuracy(X_val, Y_val,␣
↪weights)))
return weights
return weights
return activations
2
delta = np.multiply(error, SigmoidDerivative(currActivation))
weights[j-1] += lr * np.multiply(delta.T, prevActivation)
return weights
return weights
def SigmoidDerivative(x):
return np.multiply(x, 1-x)
##_Forward Propagation_##
activations = ForwardPropagation(item, weights, layers)
outputFinal = activations[-1].A1
index = FindMaxActivation(outputFinal)
def FindMaxActivation(output):
"""Find max activation in output"""
m, index = output[0], 0
for i in range(1, len(output)):
3
if(output[i] > m):
m, index = output[i], i
return index
for i in range(len(X)):
x, y = X[i], list(Y[i])
guess = Predict(x, weights)
if(y == guess):
# Guessed correctly
correct += 1
Epoch 20
Training Accuracy:0.9736842105263158
Validation Accuracy:0.9230769230769231
Epoch 40
Training Accuracy:0.9122807017543859
Validation Accuracy:1.0
Epoch 60
Training Accuracy:0.8771929824561403
Validation Accuracy:1.0
Epoch 80
Training Accuracy:0.9122807017543859
Validation Accuracy:1.0
Epoch 100
Training Accuracy:0.956140350877193
Validation Accuracy:0.9230769230769231
[ ]:
4
Perceptron
1
[3]: def step_func(z):
return 1.0 if (z > 0) else 0.0
# X --> Inputs.
# y --> labels/target.
# lr --> learning rate.
# epochs --> Number of iterations.
2
# Empty list to store how many examples were
# misclassified at every iteration.
n_miss_list = []
# Training.
for epoch in range(epochs):
# Calculating prediction/hypothesis.
y_hat = step_func(np.dot(x_i.T, theta))
# Incrementing by 1.
n_miss += 1
# X --> Inputs
# theta --> parameters
# Plotting
fig = plt.figure(figsize=(10,8))
3
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "r^")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs")
plt.xlabel("feature 1")
plt.ylabel("feature 2")
plt.title('Perceptron Algorithm')
plt.plot(x1, x2, 'y-')
[ ]:
4
LSTM-Attention model
Mounted at /content/gdrive
/content/gdrive/My Drive/Attention lstm implementation
↪'Amount']
1
[4]: X = tr_data.drop(['Class'], axis = 'columns')
Label_Data = tr_data['Class']
[9]: X_train.shape
[9]: (398041, 9)
[10]: X_test.shape
[10]: (170589, 9)
2
## Reshape input to be 3D [samples, timesteps, features] (format requis par␣
↪LSTM)
[13]: inputs=Input((1,9))
x1=LSTM(50,dropout=0.3,recurrent_dropout=0.2, return_sequences=True)(inputs)
x2=LSTM(50,dropout=0.3,recurrent_dropout=0.2)(x1)
outputs=Dense(1,activation='sigmoid')(x2)
model=Model(inputs,outputs)
Epoch 1/100
20/20 [==============================] - 14s 285ms/step - loss: 0.6664 -
accuracy: 0.8139 - val_loss: 0.6260 - val_accuracy: 0.8837
Epoch 2/100
20/20 [==============================] - 5s 240ms/step - loss: 0.5785 -
accuracy: 0.8820 - val_loss: 0.5035 - val_accuracy: 0.8910
Epoch 3/100
20/20 [==============================] - 5s 229ms/step - loss: 0.4487 -
accuracy: 0.8851 - val_loss: 0.3711 - val_accuracy: 0.8979
Epoch 4/100
20/20 [==============================] - 5s 235ms/step - loss: 0.3407 -
accuracy: 0.8953 - val_loss: 0.2905 - val_accuracy: 0.9063
Epoch 5/100
20/20 [==============================] - 5s 234ms/step - loss: 0.2856 -
accuracy: 0.9056 - val_loss: 0.2525 - val_accuracy: 0.9105
Epoch 6/100
20/20 [==============================] - 5s 233ms/step - loss: 0.2570 -
accuracy: 0.9118 - val_loss: 0.2271 - val_accuracy: 0.9119
Epoch 7/100
20/20 [==============================] - 5s 231ms/step - loss: 0.2401 -
accuracy: 0.9155 - val_loss: 0.2101 - val_accuracy: 0.9218
Epoch 8/100
20/20 [==============================] - 4s 225ms/step - loss: 0.2293 -
accuracy: 0.9180 - val_loss: 0.1987 - val_accuracy: 0.9268
Epoch 9/100
3
20/20 [==============================] - 4s 225ms/step - loss: 0.2226 -
accuracy: 0.9195 - val_loss: 0.1914 - val_accuracy: 0.9286
Epoch 10/100
20/20 [==============================] - 5s 229ms/step - loss: 0.2184 -
accuracy: 0.9197 - val_loss: 0.1872 - val_accuracy: 0.9296
Epoch 11/100
20/20 [==============================] - 5s 234ms/step - loss: 0.2146 -
accuracy: 0.9204 - val_loss: 0.1845 - val_accuracy: 0.9305
Epoch 12/100
20/20 [==============================] - 5s 226ms/step - loss: 0.2123 -
accuracy: 0.9208 - val_loss: 0.1825 - val_accuracy: 0.9309
Epoch 13/100
20/20 [==============================] - 4s 221ms/step - loss: 0.2094 -
accuracy: 0.9221 - val_loss: 0.1806 - val_accuracy: 0.9316
Epoch 14/100
20/20 [==============================] - 4s 222ms/step - loss: 0.2067 -
accuracy: 0.9227 - val_loss: 0.1796 - val_accuracy: 0.9318
Epoch 15/100
20/20 [==============================] - 5s 226ms/step - loss: 0.2053 -
accuracy: 0.9229 - val_loss: 0.1777 - val_accuracy: 0.9331
Epoch 16/100
20/20 [==============================] - 4s 223ms/step - loss: 0.2033 -
accuracy: 0.9238 - val_loss: 0.1765 - val_accuracy: 0.9338
Epoch 17/100
20/20 [==============================] - 4s 224ms/step - loss: 0.2012 -
accuracy: 0.9247 - val_loss: 0.1753 - val_accuracy: 0.9345
Epoch 18/100
20/20 [==============================] - 5s 230ms/step - loss: 0.1987 -
accuracy: 0.9254 - val_loss: 0.1738 - val_accuracy: 0.9349
Epoch 19/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1981 -
accuracy: 0.9255 - val_loss: 0.1725 - val_accuracy: 0.9352
Epoch 20/100
20/20 [==============================] - 5s 240ms/step - loss: 0.1961 -
accuracy: 0.9265 - val_loss: 0.1717 - val_accuracy: 0.9354
Epoch 21/100
20/20 [==============================] - 5s 240ms/step - loss: 0.1946 -
accuracy: 0.9268 - val_loss: 0.1709 - val_accuracy: 0.9355
Epoch 22/100
20/20 [==============================] - 5s 237ms/step - loss: 0.1936 -
accuracy: 0.9274 - val_loss: 0.1697 - val_accuracy: 0.9358
Epoch 23/100
20/20 [==============================] - 5s 239ms/step - loss: 0.1930 -
accuracy: 0.9275 - val_loss: 0.1690 - val_accuracy: 0.9360
Epoch 24/100
20/20 [==============================] - 5s 246ms/step - loss: 0.1916 -
accuracy: 0.9283 - val_loss: 0.1687 - val_accuracy: 0.9361
Epoch 25/100
4
20/20 [==============================] - 5s 240ms/step - loss: 0.1912 -
accuracy: 0.9286 - val_loss: 0.1683 - val_accuracy: 0.9361
Epoch 26/100
20/20 [==============================] - 5s 238ms/step - loss: 0.1898 -
accuracy: 0.9289 - val_loss: 0.1676 - val_accuracy: 0.9363
Epoch 27/100
20/20 [==============================] - 5s 229ms/step - loss: 0.1899 -
accuracy: 0.9291 - val_loss: 0.1673 - val_accuracy: 0.9364
Epoch 28/100
20/20 [==============================] - 5s 234ms/step - loss: 0.1887 -
accuracy: 0.9296 - val_loss: 0.1671 - val_accuracy: 0.9365
Epoch 29/100
20/20 [==============================] - 4s 223ms/step - loss: 0.1887 -
accuracy: 0.9298 - val_loss: 0.1670 - val_accuracy: 0.9366
Epoch 30/100
20/20 [==============================] - 4s 223ms/step - loss: 0.1880 -
accuracy: 0.9301 - val_loss: 0.1663 - val_accuracy: 0.9367
Epoch 31/100
20/20 [==============================] - 4s 223ms/step - loss: 0.1874 -
accuracy: 0.9301 - val_loss: 0.1661 - val_accuracy: 0.9368
Epoch 32/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1868 -
accuracy: 0.9302 - val_loss: 0.1659 - val_accuracy: 0.9368
Epoch 33/100
20/20 [==============================] - 5s 226ms/step - loss: 0.1865 -
accuracy: 0.9303 - val_loss: 0.1657 - val_accuracy: 0.9368
Epoch 34/100
20/20 [==============================] - 5s 234ms/step - loss: 0.1865 -
accuracy: 0.9305 - val_loss: 0.1653 - val_accuracy: 0.9369
Epoch 35/100
20/20 [==============================] - 5s 230ms/step - loss: 0.1857 -
accuracy: 0.9304 - val_loss: 0.1652 - val_accuracy: 0.9369
Epoch 36/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1858 -
accuracy: 0.9304 - val_loss: 0.1652 - val_accuracy: 0.9368
Epoch 37/100
20/20 [==============================] - 5s 234ms/step - loss: 0.1858 -
accuracy: 0.9304 - val_loss: 0.1650 - val_accuracy: 0.9369
Epoch 38/100
20/20 [==============================] - 5s 230ms/step - loss: 0.1848 -
accuracy: 0.9310 - val_loss: 0.1647 - val_accuracy: 0.9370
Epoch 39/100
20/20 [==============================] - 5s 233ms/step - loss: 0.1850 -
accuracy: 0.9308 - val_loss: 0.1646 - val_accuracy: 0.9370
Epoch 40/100
20/20 [==============================] - 5s 240ms/step - loss: 0.1838 -
accuracy: 0.9313 - val_loss: 0.1642 - val_accuracy: 0.9370
Epoch 41/100
5
20/20 [==============================] - 5s 229ms/step - loss: 0.1840 -
accuracy: 0.9312 - val_loss: 0.1643 - val_accuracy: 0.9370
Epoch 42/100
20/20 [==============================] - 4s 224ms/step - loss: 0.1839 -
accuracy: 0.9312 - val_loss: 0.1647 - val_accuracy: 0.9370
Epoch 43/100
20/20 [==============================] - 4s 223ms/step - loss: 0.1836 -
accuracy: 0.9312 - val_loss: 0.1639 - val_accuracy: 0.9370
Epoch 44/100
20/20 [==============================] - 5s 236ms/step - loss: 0.1834 -
accuracy: 0.9311 - val_loss: 0.1639 - val_accuracy: 0.9370
Epoch 45/100
20/20 [==============================] - 5s 229ms/step - loss: 0.1833 -
accuracy: 0.9310 - val_loss: 0.1638 - val_accuracy: 0.9370
Epoch 46/100
20/20 [==============================] - 5s 227ms/step - loss: 0.1831 -
accuracy: 0.9313 - val_loss: 0.1633 - val_accuracy: 0.9371
Epoch 47/100
20/20 [==============================] - 5s 229ms/step - loss: 0.1826 -
accuracy: 0.9315 - val_loss: 0.1632 - val_accuracy: 0.9371
Epoch 48/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1829 -
accuracy: 0.9314 - val_loss: 0.1631 - val_accuracy: 0.9371
Epoch 49/100
20/20 [==============================] - 4s 223ms/step - loss: 0.1831 -
accuracy: 0.9314 - val_loss: 0.1633 - val_accuracy: 0.9371
Epoch 50/100
20/20 [==============================] - 5s 225ms/step - loss: 0.1827 -
accuracy: 0.9315 - val_loss: 0.1627 - val_accuracy: 0.9372
Epoch 51/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1817 -
accuracy: 0.9316 - val_loss: 0.1625 - val_accuracy: 0.9372
Epoch 52/100
20/20 [==============================] - 5s 230ms/step - loss: 0.1817 -
accuracy: 0.9317 - val_loss: 0.1619 - val_accuracy: 0.9373
Epoch 53/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1818 -
accuracy: 0.9314 - val_loss: 0.1622 - val_accuracy: 0.9372
Epoch 54/100
20/20 [==============================] - 5s 227ms/step - loss: 0.1813 -
accuracy: 0.9316 - val_loss: 0.1621 - val_accuracy: 0.9372
Epoch 55/100
20/20 [==============================] - 5s 231ms/step - loss: 0.1814 -
accuracy: 0.9315 - val_loss: 0.1621 - val_accuracy: 0.9372
Epoch 56/100
20/20 [==============================] - 4s 225ms/step - loss: 0.1812 -
accuracy: 0.9319 - val_loss: 0.1619 - val_accuracy: 0.9373
Epoch 57/100
6
20/20 [==============================] - 5s 226ms/step - loss: 0.1805 -
accuracy: 0.9318 - val_loss: 0.1622 - val_accuracy: 0.9372
Epoch 58/100
20/20 [==============================] - 5s 230ms/step - loss: 0.1808 -
accuracy: 0.9316 - val_loss: 0.1613 - val_accuracy: 0.9373
Epoch 59/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1806 -
accuracy: 0.9316 - val_loss: 0.1614 - val_accuracy: 0.9373
Epoch 60/100
20/20 [==============================] - 4s 224ms/step - loss: 0.1803 -
accuracy: 0.9319 - val_loss: 0.1611 - val_accuracy: 0.9373
Epoch 61/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1801 -
accuracy: 0.9321 - val_loss: 0.1611 - val_accuracy: 0.9373
Epoch 62/100
20/20 [==============================] - 5s 234ms/step - loss: 0.1801 -
accuracy: 0.9317 - val_loss: 0.1615 - val_accuracy: 0.9373
Epoch 63/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1797 -
accuracy: 0.9319 - val_loss: 0.1607 - val_accuracy: 0.9373
Epoch 64/100
20/20 [==============================] - 4s 224ms/step - loss: 0.1796 -
accuracy: 0.9321 - val_loss: 0.1604 - val_accuracy: 0.9373
Epoch 65/100
20/20 [==============================] - 4s 226ms/step - loss: 0.1794 -
accuracy: 0.9321 - val_loss: 0.1608 - val_accuracy: 0.9373
Epoch 66/100
20/20 [==============================] - 5s 226ms/step - loss: 0.1797 -
accuracy: 0.9317 - val_loss: 0.1608 - val_accuracy: 0.9373
Epoch 67/100
20/20 [==============================] - 4s 224ms/step - loss: 0.1793 -
accuracy: 0.9319 - val_loss: 0.1608 - val_accuracy: 0.9374
Epoch 68/100
20/20 [==============================] - 4s 226ms/step - loss: 0.1786 -
accuracy: 0.9323 - val_loss: 0.1602 - val_accuracy: 0.9374
Epoch 69/100
20/20 [==============================] - 5s 227ms/step - loss: 0.1789 -
accuracy: 0.9324 - val_loss: 0.1602 - val_accuracy: 0.9374
Epoch 70/100
20/20 [==============================] - 5s 227ms/step - loss: 0.1789 -
accuracy: 0.9318 - val_loss: 0.1599 - val_accuracy: 0.9374
Epoch 71/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1789 -
accuracy: 0.9320 - val_loss: 0.1602 - val_accuracy: 0.9374
Epoch 72/100
20/20 [==============================] - 5s 229ms/step - loss: 0.1784 -
accuracy: 0.9322 - val_loss: 0.1597 - val_accuracy: 0.9374
Epoch 73/100
7
20/20 [==============================] - 4s 226ms/step - loss: 0.1781 -
accuracy: 0.9322 - val_loss: 0.1598 - val_accuracy: 0.9374
Epoch 74/100
20/20 [==============================] - 4s 224ms/step - loss: 0.1782 -
accuracy: 0.9321 - val_loss: 0.1600 - val_accuracy: 0.9374
Epoch 75/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1781 -
accuracy: 0.9321 - val_loss: 0.1596 - val_accuracy: 0.9374
Epoch 76/100
20/20 [==============================] - 4s 225ms/step - loss: 0.1780 -
accuracy: 0.9320 - val_loss: 0.1596 - val_accuracy: 0.9374
Epoch 77/100
20/20 [==============================] - 4s 219ms/step - loss: 0.1772 -
accuracy: 0.9324 - val_loss: 0.1591 - val_accuracy: 0.9373
Epoch 78/100
20/20 [==============================] - 4s 217ms/step - loss: 0.1774 -
accuracy: 0.9321 - val_loss: 0.1591 - val_accuracy: 0.9374
Epoch 79/100
20/20 [==============================] - 4s 220ms/step - loss: 0.1773 -
accuracy: 0.9322 - val_loss: 0.1589 - val_accuracy: 0.9374
Epoch 80/100
20/20 [==============================] - 5s 229ms/step - loss: 0.1771 -
accuracy: 0.9324 - val_loss: 0.1586 - val_accuracy: 0.9374
Epoch 81/100
20/20 [==============================] - 5s 240ms/step - loss: 0.1771 -
accuracy: 0.9324 - val_loss: 0.1588 - val_accuracy: 0.9374
Epoch 82/100
20/20 [==============================] - 5s 236ms/step - loss: 0.1771 -
accuracy: 0.9321 - val_loss: 0.1587 - val_accuracy: 0.9374
Epoch 83/100
20/20 [==============================] - 5s 241ms/step - loss: 0.1764 -
accuracy: 0.9324 - val_loss: 0.1585 - val_accuracy: 0.9375
Epoch 84/100
20/20 [==============================] - 5s 233ms/step - loss: 0.1761 -
accuracy: 0.9324 - val_loss: 0.1586 - val_accuracy: 0.9375
Epoch 85/100
20/20 [==============================] - 5s 233ms/step - loss: 0.1765 -
accuracy: 0.9322 - val_loss: 0.1585 - val_accuracy: 0.9374
Epoch 86/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1759 -
accuracy: 0.9326 - val_loss: 0.1582 - val_accuracy: 0.9375
Epoch 87/100
20/20 [==============================] - 5s 235ms/step - loss: 0.1751 -
accuracy: 0.9328 - val_loss: 0.1575 - val_accuracy: 0.9375
Epoch 88/100
20/20 [==============================] - 5s 226ms/step - loss: 0.1753 -
accuracy: 0.9324 - val_loss: 0.1581 - val_accuracy: 0.9375
Epoch 89/100
8
20/20 [==============================] - 5s 233ms/step - loss: 0.1746 -
accuracy: 0.9329 - val_loss: 0.1578 - val_accuracy: 0.9375
Epoch 90/100
20/20 [==============================] - 5s 227ms/step - loss: 0.1747 -
accuracy: 0.9323 - val_loss: 0.1578 - val_accuracy: 0.9375
Epoch 91/100
20/20 [==============================] - 5s 228ms/step - loss: 0.1749 -
accuracy: 0.9326 - val_loss: 0.1585 - val_accuracy: 0.9376
Epoch 92/100
20/20 [==============================] - 5s 237ms/step - loss: 0.1746 -
accuracy: 0.9329 - val_loss: 0.1578 - val_accuracy: 0.9376
Epoch 93/100
20/20 [==============================] - 5s 233ms/step - loss: 0.1742 -
accuracy: 0.9330 - val_loss: 0.1582 - val_accuracy: 0.9375
Epoch 94/100
20/20 [==============================] - 5s 235ms/step - loss: 0.1740 -
accuracy: 0.9330 - val_loss: 0.1588 - val_accuracy: 0.9376
Epoch 95/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1731 -
accuracy: 0.9334 - val_loss: 0.1585 - val_accuracy: 0.9377
Epoch 96/100
20/20 [==============================] - 5s 236ms/step - loss: 0.1732 -
accuracy: 0.9331 - val_loss: 0.1587 - val_accuracy: 0.9377
Epoch 97/100
20/20 [==============================] - 5s 232ms/step - loss: 0.1727 -
accuracy: 0.9333 - val_loss: 0.1590 - val_accuracy: 0.9378
Epoch 98/100
20/20 [==============================] - 4s 224ms/step - loss: 0.1724 -
accuracy: 0.9337 - val_loss: 0.1598 - val_accuracy: 0.9377
Epoch 99/100
20/20 [==============================] - 5s 227ms/step - loss: 0.1722 -
accuracy: 0.9335 - val_loss: 0.1597 - val_accuracy: 0.9376
Epoch 100/100
20/20 [==============================] - 5s 242ms/step - loss: 0.1715 -
accuracy: 0.9339 - val_loss: 0.1611 - val_accuracy: 0.9375
# load model
model = load_model('Save_Model.h5')
9
# summarize model.
model.summary()
10
[ ]: # predict probabilities for test set
yhat_probs = model.predict(val_LSTM_X, verbose=0)
# reduce to 1d array
yhat_probs = yhat_probs[:, 0]
Accuracy: 0.937135
Precision: 0.987155
Recall: 0.886014
[ ]: %matplotlib inline
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
[ ]: cm = confusion_matrix(y_true=val_LSTM_y, y_pred=yhat_probs)
if normalize:
11
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
labels = ['Normal','Fraud']
12
[ ]: class attention(Layer):
def __init__(self,**kwargs):
super(attention,self).__init__(**kwargs)
def build(self,input_shape):
self.W=self.
↪add_weight(name="att_weight",shape=(input_shape[-1],1),initializer="normal")
self.b=self.
↪add_weight(name="att_bias",shape=(input_shape[1],1),initializer="zeros")
super(attention, self).build(input_shape)
def call(self,x):
et=K.squeeze(K.tanh(K.dot(x,self.W)+self.b),axis=-1)
at=K.softmax(et)
at=K.expand_dims(at,axis=-1)
output=x*at
return K.sum(output,axis=1)
def compute_output_shape(self,input_shape):
return (input_shape[0],input_shape[-1])
def get_config(self):
return super(attention,self).get_config()
[ ]: inputs1=Input((1,9))
att_in=LSTM(50,return_sequences=True,dropout=0.3,recurrent_dropout=0.2)(inputs1)
att_in_1=LSTM(50,return_sequences=True,dropout=0.3,recurrent_dropout=0.
↪2)(att_in)
att_out=attention()(att_in_1)
outputs1=Dense(1,activation='sigmoid',trainable=True)(att_out)
model1=Model(inputs1,outputs1)
[ ]: model1.compile(loss='binary_crossentropy', optimizer='adam',␣
↪metrics=['accuracy'])
[ ]: history1=model1.fit(train_LSTM_X, train_LSTM_y,epochs=100,batch_size=30000,␣
↪validation_data=(val_LSTM_X, val_LSTM_y))
13
# load model
model1 = load_model('Save_Model_Attention.h5')
# summarize model.
model1.summary()
Accuracy: 0.9672
Precision: 0.9885
Recall: 0.9191
14
15