mobilenetv2
import os
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link].mobilenet_v2 import MobileNetV2
from [Link] import Sequential
from [Link] import Dense, Flatten, Dropout
from [Link] import Adam
from [Link] import ModelCheckpoint
from [Link] import classification_report, confusion_matrix
import numpy as np
# Define constants
IMAGE_SIZE = 224
BATCH_SIZE = 64
NUM_CLASSES = 5 # Update to match the number of classes in your dataset
EPOCHS = 50
# Define data directories
base_dir = [Link]([Link](), 'The Gems Atlas')
train_dir = [Link](base_dir, 'train')
valid_dir = [Link](base_dir, 'valid')
test_dir = [Link](base_dir, 'test')
# Define data generators for train, validation, and test sets
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical'
)
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(
valid_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical'
)
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=False
)
# Load pre-trained MobileNetV2 model
base_model = MobileNetV2(
weights='imagenet', include_top=False, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
# Freeze the layers in the base model
for layer in base_model.layers:
[Link] = False
# Build the model
model = Sequential([
base_model,
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(NUM_CLASSES, activation='softmax')
])
# Compile the model
[Link](optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](
train_generator,
epochs=EPOCHS,
validation_data=validation_generator)
# Evaluate the model on the test data
loss, accuracy = [Link](test_generator)
print('Test loss:', loss)
print('Test accuracy:', accuracy)
# Make predictions on the test data
y_pred = [Link](test_generator)
y_pred_classes = [Link](y_pred, axis=1)
y_true = test_generator.classes
# Print the confusion matrix
print("Confusion Matrix:")
print(confusion_matrix(y_true, y_pred_classes))
# Print the classification report
target_names = list(test_generator.class_indices.keys())
print("Classification Report:")
print(classification_report(y_true, y_pred_classes, target_names=target_names))
# Calculate precision, recall, and F1 score
report = classification_report(y_true, y_pred_classes, target_names=target_names, output_dict=True)
precision = report['weighted avg']['precision']
recall = report['weighted avg']['recall']
f1_score = report['weighted avg']['f1-score']
print('Precision:', precision)
print('Recall:', recall)
print('F1 score:', f1_score)
Inception v3
import os
import numpy as np
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link].inception_v3 import InceptionV3
from [Link] import Sequential
from [Link] import Dense, Flatten, Dropout
from [Link] import Adam
from [Link] import classification_report, confusion_matrix
# Define constants
IMAGE_SIZE = 224
BATCH_SIZE = 64
NUM_CLASSES = 5 # Update this based on your actual number of classes
EPOCHS = 50
# Define data directories
train_dir = [Link]([Link](), 'The Gems Atlas/train') # Update path as needed
valid_dir = [Link]([Link](), 'The Gems Atlas/valid') # Update path as needed
test_dir = [Link]([Link](), 'The Gems Atlas/test') # Update path as needed
# Define data generators
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True # Shuffle training data
)
valid_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = valid_datagen.flow_from_directory(
valid_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical'
)
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=False
)
# Load pre-trained Inception V3 model
base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(IMAGE_SIZE,
IMAGE_SIZE, 3))
# Freeze the layers in the base model
for layer in base_model.layers:
[Link] = False
# Build the model
model = Sequential()
[Link](base_model)
[Link](Flatten())
[Link](Dense(256, activation='relu'))
[Link](Dropout(0.5))
[Link](Dense(NUM_CLASSES, activation='softmax')) # Change NUM_CLASSES to match the dataset
# Compile the model
[Link](optimizer=Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](
train_generator,
validation_data=validation_generator,
epochs=EPOCHS
)
# Evaluate the model on the test data
loss, accuracy = [Link](test_generator)
print('Test loss:', loss)
print('Test accuracy:', accuracy)
# Predictions
y_pred = [Link](test_generator)
y_pred_classes = [Link](y_pred, axis=1)
y_true = test_generator.classes
# Print the confusion matrix
print(confusion_matrix(y_true, y_pred_classes))
# Print the classification report
target_names = list(test_generator.class_indices.keys())
print(classification_report(y_true, y_pred_classes, target_names=target_names))
# Calculate and print overall accuracy
accuracy = [Link](y_pred_classes == y_true)
print('Accuracy:', accuracy)
# Calculate precision, recall, and F1 score
report = classification_report(y_true, y_pred_classes, target_names=target_names, output_dict=True)
precision = report['weighted avg']['precision']
recall = report['weighted avg']['recall']
f1_score = report['weighted avg']['f1-score']
print('Precision:', precision)
print('Recall:', recall)
print('F1 score:', f1_score)