SOURCECODE
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import ResNet50, EfficientNetB0, MobileNetV2
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from google.colab import drive
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Mount Google Drive
drive.mount('/content/drive')
# Define dataset path
dataset_path = "/content/drive/My Drive/Colab Notebooks/Phase 2"
# Define categories
categories = ["A", "B", "T", "H"]
# Image size for model
IMG_SIZE = 224 # ResNet50, EfficientNet, and MobileNetV2 require 224x224
# Function to preprocess images
def preprocess_image(img_path):
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
# Convert grayscale to 3-channel RGB for GrabCut
img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
# Apply Bilateral Filtering
img_rgb = cv2.bilateralFilter(img_rgb, 9, 75, 75)
# Apply GrabCut
mask = np.zeros(img_rgb.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
rect = (10, 10, img_rgb.shape[1]-10, img_rgb.shape[0]-10)
cv2.grabCut(img_rgb, mask, rect, bgdModel, fgdModel, 5,
cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
img_rgb = img_rgb * mask2[:, :, np.newaxis]
# Convert back to grayscale for CLAHE
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
# Apply CLAHE
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img_gray = clahe.apply(img_gray)
# Convert back to 3-channel for EfficientNet & MobileNet
img_final = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
# Normalize
img_final = img_final / 255.0
return img_final
# Load and preprocess data
X, y = [], []
for label, category in enumerate(categories):
folder_path = os.path.join(dataset_path, category)
for img_name in os.listdir(folder_path):
img_path = os.path.join(folder_path, img_name)
processed_img = preprocess_image(img_path)
X.append(processed_img)
y.append(label)
X = np.array(X)
y = np.array(y)
# Convert labels to categorical
y = to_categorical(y, num_classes=len(categories))
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
# Load ResNet50 for Feature Extraction
base_model = ResNet50(weights='imagenet', include_top=False,
input_shape=(IMG_SIZE, IMG_SIZE, 3))
base_model.trainable = False # Freeze ResNet50 layers
# Feature extraction
x = base_model.output
x = GlobalAveragePooling2D()(x)
feature_extractor = Model(inputs=base_model.input, outputs=x)
# Extract features
X_train_features = feature_extractor.predict(X_train)
X_test_features = feature_extractor.predict(X_test)
# Define EfficientNet Model
efficientnet_model = EfficientNetB0(weights='imagenet', include_top=False,
input_shape=(IMG_SIZE, IMG_SIZE, 3))
efficientnet_model.trainable = False
# Define MobileNetV2 Model
mobilenet_model = MobileNetV2(weights='imagenet', include_top=False,
input_shape=(IMG_SIZE, IMG_SIZE, 3))
mobilenet_model.trainable = False
# Function to build the ensemble model
def build_ensemble():
inputs = tf.keras.Input(shape=(X_train_features.shape[1],))
# EfficientNet branch
eff_x = Dense(128, activation='relu')(inputs)
eff_x = Dropout(0.3)(eff_x)
eff_x = Dense(len(categories), activation='softmax')(eff_x)
# MobileNet branch
mob_x = Dense(128, activation='relu')(inputs)
mob_x = Dropout(0.3)(mob_x)
mob_x = Dense(len(categories), activation='softmax')(mob_x)
# Combine predictions
outputs = tf.keras.layers.Average()([eff_x, mob_x])
model = Model(inputs=inputs, outputs=outputs)
return model
# Create ensemble model
ensemble_model = build_ensemble()
# Compile model
ensemble_model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# Train model
history = ensemble_model.fit(X_train_features, y_train, epochs=20,
validation_data=(X_test_features, y_test), batch_size=8)
# Evaluate the model to get test loss and accuracy
test_loss, test_acc = ensemble_model.evaluate(X_test_features, y_test)
print(f"Test Accuracy: {str(int(test_acc * 100)).replace(str(int(test_acc * 100)}%")
# Install required libraries
!pip install scikit-fuzzy
# Imports
from google.colab import drive
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.applications import EfficientNetB0, MobileNetV2, ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten, Dense, Concatenate, Input
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from google.colab.patches import cv2_imshow
import os
import skfuzzy as fuzz
from skfuzzy import control as ctrl
from sklearn.linear_model import LinearRegression
# Mount Google Drive
drive.mount('/content/drive')
# Class labels
class_labels = ['Aphids', 'Bacterial Blight', 'Leaf Spot', 'Healthy']
# Pesticide mapping dictionary
pesticide_mapping = {
'Aphids': {
'mild': ('Neem Oil', '2 ml/liter', 'Apply once every 10 days'),
'moderate': ('Imidacloprid 17.8% SL', '3 ml/liter', 'Apply once every 7 days'),
'severe': ('Thiamethoxam 25% WG', '4 ml/liter', 'Apply once every 5 days')
},
'Bacterial Blight': {
'mild': ('Copper Oxychloride', '2.5 g/liter', 'Apply once every 12 days'),
'moderate': ('Streptomycin + Tetracycline', '3 g/liter', 'Apply once every 7 days'),
'severe': ('Kasugamycin', '4 g/liter', 'Apply once every 5 days')
},
'Leaf Spot': {
'mild': ('Mancozeb 75% WP', '2.5 g/liter', 'Apply once every 10 days'),
'moderate': ('Chlorothalonil', '3 g/liter', 'Apply once every 7 days'),
'severe': ('Azoxystrobin', '4 g/liter', 'Apply once every 5 days')
},
'Healthy': {
'mild': ('No pesticide needed', '-', '-'),
'moderate': ('No pesticide needed', '-', '-'),
'severe': ('No pesticide needed', '-', '-')
# Function to preprocess input image
def preprocess_image(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
image = np.expand_dims(image, axis=0) / 255.0 # Normalize
return image
# Load Pretrained Models
input_layer = Input(shape=(224, 224, 3))
resnet_model = ResNet50(weights="imagenet", include_top=False,
input_tensor=input_layer)
efficientnet_model = EfficientNetB0(weights="imagenet", include_top=False,
input_tensor=input_layer)
mobilenet_model = MobileNetV2(weights="imagenet", include_top=False,
input_tensor=input_layer)
# Feature extraction
def extract_features(image):
resnet_features = resnet_model.predict(image)
efficientnet_features = efficientnet_model.predict(image)
mobilenet_features = mobilenet_model.predict(image)
return resnet_features, efficientnet_features, mobilenet_features
# Build Ensemble Model
def build_model():
resnet_flatten = Flatten()(resnet_model.output)
efficientnet_flatten = Flatten()(efficientnet_model.output)
mobilenet_flatten = Flatten()(mobilenet_model.output)
concatenated_features = Concatenate()([resnet_flatten, efficientnet_flatten,
mobilenet_flatten])
dense_layer = Dense(256, activation='relu')(concatenated_features)
output_layer = Dense(4, activation='softmax')(dense_layer)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
return model
# Fuzzy Logic Setup
severity_input = ctrl.Antecedent(np.arange(0, 11, 1), 'severity')
severity_level = ctrl.Consequent(np.arange(0, 11, 1), 'level')
severity_input