0% found this document useful (0 votes)
60 views8 pages

Content: From Import Import As Import Import Import As

The document describes a convolutional neural network model to classify hand gestures from images. It loads image data of 10 different hand gestures, preprocesses and splits the data, defines a CNN model with convolutional and dense layers, trains the model for 7 epochs, and evaluates it on the test set with over 99% accuracy.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
60 views8 pages

Content: From Import Import As Import Import Import As

The document describes a convolutional neural network model to classify hand gestures from images. It loads image data of 10 different hand gestures, preprocesses and splits the data, defines a CNN model with convolutional and dense layers, trains the model for 7 epochs, and evaluates it on the test set with over 99% accuracy.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 8

Content

The database is composed by 10 different hand-gestures that were performed by 10 different subjects (5 men and 5
women).

In [1]: from tensorflow import keras


import matplotlib.pyplot as plt # for plotting
import os # provides a way of using operating system dependent functionality
import cv2 #Image handling library
import numpy as np

# Import of keras model and hidden layers for our convolutional network
from tensorflow.keras.layers import Conv2D, Activation, MaxPool2D, Dense, Flatten, Dr
opout

In [2]: CATEGORIES = ["01_palm", '02_l','03_fist','04_fist_moved','05_thumb','06_index','07_o


k','08_palm_moved','09_c','10_down']
IMG_SIZE = 50

# paths for dataset


data_path = "D:\Jupyter\myProject\dataset2\data\leapGestRecog\leapGestRecog"

The Data
In [3]: # Loading the images and their class(0 - 9)
image_data = []
for dr in os.listdir(data_path):
for category in CATEGORIES:
class_index = CATEGORIES.index(category)
path = os.path.join(data_path, dr, category)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
image_data.append([cv2.resize(img_arr, (IMG_SIZE, IMG_SIZE)), class_i
ndex])
except Exception as e:
pass
image_data[0]

Out[3]: [array([[5, 4, 4, ..., 3, 4, 2],


[5, 4, 5, ..., 3, 3, 3],
[4, 5, 4, ..., 4, 5, 3],
...,
[4, 5, 5, ..., 5, 5, 5],
[5, 5, 6, ..., 5, 7, 4],
[4, 7, 5, ..., 5, 4, 4]], dtype=uint8),
0]

In [4]: # shuffle the input data


import random
random.shuffle(image_data)
In [5]: input_data = []
label = []
for X, y in image_data:
input_data.append(X)
label.append(y)

In [6]: label[:10]

Out[6]: [5, 6, 5, 0, 5, 7, 0, 8, 7, 5]

In [7]: plt.figure(1, figsize=(10,10))


for i in range(1,10):
plt.subplot(3,3,i)
plt.imshow(image_data[i][0], cmap='hot')
plt.xticks([])
plt.yticks([])
plt.title(CATEGORIES[label[i]][3:])
plt.show()

In [8]: # Normalizing the data


input_data = np.array(input_data)
label = np.array(label)
input_data = input_data/255.0
input_data.shape

Out[8]: (20000, 50, 50)


In [9]: # one hot encoding
label = keras.utils.to_categorical(label, num_classes=10,dtype='i1')
label[0]

Out[9]: array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype=int8)

In [10]: # reshaping the data


input_data.shape = (-1, IMG_SIZE, IMG_SIZE, 1)

In [11]: # splitting the input_data to train and test data


from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(input_data, label, test_size = 0.
3, random_state=0)

The Model
In [12]: model = keras.models.Sequential()

model.add(Conv2D(filters = 32, kernel_size = (3,3), input_shape = (IMG_SIZE, IMG_SIZE


, 1)))
model.add(Activation('relu'))

model.add(Conv2D(filters = 32, kernel_size = (3,3)))


model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.3))

model.add(Conv2D(filters = 64, kernel_size = (3,3)))


model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.3))

model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy'])
In [13]: model.summary()

Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 48, 48, 32) 320
_________________________________________________________________
activation (Activation) (None, 48, 48, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 46, 46, 32) 9248
_________________________________________________________________
activation_1 (Activation) (None, 46, 46, 32) 0
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 23, 23, 32) 0
_________________________________________________________________
dropout (Dropout) (None, 23, 23, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 21, 21, 64) 18496
_________________________________________________________________
activation_2 (Activation) (None, 21, 21, 64) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 10, 10, 64) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 10, 10, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 6400) 0
_________________________________________________________________
dense (Dense) (None, 256) 1638656
_________________________________________________________________
dense_1 (Dense) (None, 10) 2570
=================================================================
Total params: 1,669,290
Trainable params: 1,669,290
Non-trainable params: 0
_________________________________________________________________
In [57]: from tensorflow.keras.utils import plot_model
plot_model(model, to_file='model.png',
rankdir='TB',
expand_nested=False,
show_shapes=True,
dpi=80,
)
Out[57]:
In [14]: model.fit(X_train, y_train, epochs = 7, batch_size=32, validation_data=(X_test, y_tes
t))

Train on 14000 samples, validate on 6000 samples


Epoch 1/7
14000/14000 [==============================] - 59s 4ms/sample - loss: 0.3100 - accur
acy: 0.8993 - val_loss: 0.0845 - val_accuracy: 0.9733
Epoch 2/7
14000/14000 [==============================] - 36s 3ms/sample - loss: 0.0187 - accur
acy: 0.9949 - val_loss: 0.0021 - val_accuracy: 0.9995
Epoch 3/7
14000/14000 [==============================] - 37s 3ms/sample - loss: 0.0096 - accur
acy: 0.9975 - val_loss: 0.0075 - val_accuracy: 0.9990
Epoch 4/7
14000/14000 [==============================] - 36s 3ms/sample - loss: 0.0056 - accur
acy: 0.9988 - val_loss: 0.0095 - val_accuracy: 0.9988
Epoch 5/7
14000/14000 [==============================] - 36s 3ms/sample - loss: 0.0039 - accur
acy: 0.9988 - val_loss: 0.0044 - val_accuracy: 0.9995
Epoch 6/7
14000/14000 [==============================] - 37s 3ms/sample - loss: 0.0036 - accur
acy: 0.9994 - val_loss: 0.0052 - val_accuracy: 0.9993
Epoch 7/7
14000/14000 [==============================] - 38s 3ms/sample - loss: 0.0014 - accur
acy: 0.9995 - val_loss: 0.0046 - val_accuracy: 0.9993

Out[14]: <tensorflow.python.keras.callbacks.History at 0x22e15d37608>

In [15]: plt.plot(model.history.history['loss'])
plt.plot(model.history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
In [16]: plt.plot(model.history.history['accuracy'])
plt.plot(model.history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()

In [17]: #calculate loss and accuracy on test data

test_loss, test_accuracy = model.evaluate(X_test, y_test)

print('Test accuracy: {:2.2f}%'.format(test_accuracy*100))

6000/6000 [==============================] - 4s 666us/sample - loss: 0.0046 - accura


cy: 0.9993
Test accuracy: 99.93%

You might also like