Research
A Two-Phase Pneumonia Detection and Subclassification Model Using Hybridized EfficientNet and Random Forest
This study explores the application of EfficientNet combined with Random Forest for classifying chest X-ray images to detect and categorize pneumonia. Initially, the model was em- ployed to classify X-ray images as either normal or pneumonia- affected, achieving an accuracy of 98.4%. Subsequently, pneu- monia cases were further categorized into three distinct types: COVID-19-induced, bacterial, and viral pneumonia, yielding a classification accuracy of 83%. The feature extraction capability of EfficientNet, a state-of-the-art convolutional neural network pre-trained on ImageNet, was leveraged to distill meaningful patterns from the medical images, while Random Forest (known for handling complex, non-linear relationships), served as the classifier for final decision-making process. Notably, the model effectively differentiated between bacterial and viral pneumonia, achieving an accuracy of 86.91%, despite the overlapping features between these two types. This dual-stage approach (first iden- tifying the presence of pneumonia, followed by specific subtype classification) demonstrates the potential for AI-driven diagnostic tools to assist in more accurate and detailed pneumonia diagnosis.
Binary Classifications
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, auc, roc_curve, recall_score, f1_score, confusion_matrix, classification_report
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Paths to the train and test directories
train_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/BinaryClassification/Normal_Pneumonia/train/'
test_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/BinaryClassification/Normal_Pneumonia/test/'
# ImageDataGenerator to load and preprocess the images
datagen = ImageDataGenerator(rescale=1./255)
# Load the training data
train_generator = datagen.flow_from_directory(
train_dir,
target_size=(224, 224), # Use 224x224 for EfficientNet
batch_size=32,
class_mode='binary',
shuffle=False
)
# Load the test data
test_generator = datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=32,
class_mode='binary',
shuffle=False
)
# Load EfficientNet model pre-trained on ImageNet without the classification head
base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# Freeze the base model layers
base_model.trainable = False
# Function to extract features
def extract_features(generator, model, batch_size):
features = model.predict(generator, steps=generator.samples // batch_size + 1)
return features
# Extract features for training and test sets
train_features = extract_features(train_generator, base_model, batch_size=32)
test_features = extract_features(test_generator, base_model, batch_size=32)
# Get the corresponding labels
train_labels = train_generator.classes
test_labels = test_generator.classes
# Reshape features for Random Forest input
train_features_flat = train_features.reshape(train_features.shape[0], -1)
test_features_flat = test_features.reshape(test_features.shape[0], -1)
# Train RandomForest classifier
rf_classifier = RandomForestClassifier(n_estimators=100, random_state=42)
rf_classifier.fit(train_features_flat, train_labels)
# Make predictions on the test set
y_pred = rf_classifier.predict(test_features_flat)
# Calculate accuracy
accuracy = accuracy_score(test_labels, y_pred)
print(f"Accuracy: {accuracy * 100:.2f}%")
# Confusion Matrix
conf_matrix = confusion_matrix(test_labels, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=train_generator.class_indices.keys(), yticklabels=train_generator.class_indices.keys())
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.title('Confusion Matrix')
plt.show()
# Accuracy, Precision, Recall, F1 Score
precision = precision_score(test_labels, y_pred, average='weighted')
recall = recall_score(test_labels, y_pred, average='weighted')
f1 = f1_score(test_labels, y_pred, average='weighted')
print(f"Precision: {precision * 100:.2f}%")
print(f"Recall: {recall * 100:.2f}%")
print(f"F1 Score: {f1 * 100:.2f}%")
# Classification Report
print(classification_report(test_labels, y_pred, target_names=test_generator.class_indices.keys()))
# Predict probabilities for the test set
y_score = rf_classifier.predict_proba(test_features_flat)
# Compute ROC curve and ROC area for the binary class
fpr, tpr, _ = roc_curve(test_labels, y_score[:, 1]) # Get FPR and TPR for the positive class
roc_auc = auc(fpr, tpr)
# Plot ROC curve
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Normal vs Pneumonia Classification (EfficientNET-RF)')
plt.legend(loc="lower right")
plt.show()
Found 7400 images belonging to 2 classes. Found 1808 images belonging to 2 classes.
/usr/local/lib/python3.10/dist-packages/keras/src/trainers/data_adapters/py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored. self._warn_if_super_not_called()
232/232 ━━━━━━━━━━━━━━━━━━━━ 735s 3s/step 57/57 ━━━━━━━━━━━━━━━━━━━━ 168s 3s/step Accuracy: 98.40%
Precision: 98.54% Recall: 98.40% F1 Score: 98.43% precision recall f1-score support Normal 0.91 1.00 0.95 270 Pneumonia 1.00 0.98 0.99 1538 accuracy 0.98 1808 macro avg 0.95 0.99 0.97 1808 weighted avg 0.99 0.98 0.98 1808
Binary between Bacteria vs Viral
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, auc, roc_curve, recall_score, f1_score, confusion_matrix, classification_report
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Paths to the train and test directories
train_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/BinaryClassification/PneumoniaBacteria_PneumoniaViral/train/'
test_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/BinaryClassification/PneumoniaBacteria_PneumoniaViral/test/'
# ImageDataGenerator to load and preprocess the images
datagen = ImageDataGenerator(rescale=1./255)
# Load the training data
train_generator = datagen.flow_from_directory(
train_dir,
target_size=(224, 224), # Use 224x224 for EfficientNet
batch_size=32,
class_mode='binary',
shuffle=False
)
# Load the test data
test_generator = datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=32,
class_mode='binary',
shuffle=False
)
# Load EfficientNet model pre-trained on ImageNet without the classification head
base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# Freeze the base model layers
base_model.trainable = False
# Function to extract features
def extract_features(generator, model, batch_size):
features = model.predict(generator, steps=generator.samples // batch_size + 1)
return features
# Extract features for training and test sets
train_features = extract_features(train_generator, base_model, batch_size=32)
test_features = extract_features(test_generator, base_model, batch_size=32)
# Get the corresponding labels
train_labels = train_generator.classes
test_labels = test_generator.classes
# Reshape features for Random Forest input
train_features_flat = train_features.reshape(train_features.shape[0], -1)
test_features_flat = test_features.reshape(test_features.shape[0], -1)
# Train RandomForest classifier
rf_classifier = RandomForestClassifier(n_estimators=100, random_state=42)
rf_classifier.fit(train_features_flat, train_labels)
# Make predictions on the test set
y_pred = rf_classifier.predict(test_features_flat)
# Calculate accuracy
accuracy = accuracy_score(test_labels, y_pred)
print(f"Accuracy: {accuracy * 100:.2f}%")
# Confusion Matrix
conf_matrix = confusion_matrix(test_labels, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=train_generator.class_indices.keys(), yticklabels=train_generator.class_indices.keys())
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.title('Confusion Matrix')
plt.show()
# Accuracy, Precision, Recall, F1 Score
precision = precision_score(test_labels, y_pred, average='weighted')
recall = recall_score(test_labels, y_pred, average='weighted')
f1 = f1_score(test_labels, y_pred, average='weighted')
print(f"Precision: {precision * 100:.2f}%")
print(f"Recall: {recall * 100:.2f}%")
print(f"F1 Score: {f1 * 100:.2f}%")
# Classification Report
print(classification_report(test_labels, y_pred, target_names=test_generator.class_indices.keys()))
# Predict probabilities for the test set
y_score = rf_classifier.predict_proba(test_features_flat)
# Compute ROC curve and ROC area for the binary class
fpr, tpr, _ = roc_curve(test_labels, y_score[:, 1]) # Get FPR and TPR for the positive class
roc_auc = auc(fpr, tpr)
# Plot ROC curve
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Bacteria vs Viral Classification (EfficientNET-RF)')
plt.legend(loc="lower right")
plt.show()
Found 3328 images belonging to 2 classes. Found 1329 images belonging to 2 classes.
/usr/local/lib/python3.10/dist-packages/keras/src/trainers/data_adapters/py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored. self._warn_if_super_not_called()
105/105 ━━━━━━━━━━━━━━━━━━━━ 299s 3s/step
/usr/lib/python3.10/contextlib.py:153: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset. self.gen.throw(typ, value, traceback)
42/42 ━━━━━━━━━━━━━━━━━━━━ 127s 3s/step Accuracy: 86.91%
Precision: 88.79% Recall: 86.91% F1 Score: 87.40% precision recall f1-score support Pneumonia-Bacteria 0.95 0.87 0.91 1001 Pneumonia-Viral 0.68 0.88 0.77 328 accuracy 0.87 1329 macro avg 0.82 0.87 0.84 1329 weighted avg 0.89 0.87 0.87 1329
Multiclass Categorizations of Pneumonia: Covid-19, Bacteria and Viral
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, auc, roc_curve, recall_score, f1_score, confusion_matrix, classification_report
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from itertools import cycle
# Paths to the train and test directories
train_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/MultiClassClassification/PneumoniaClassesOnly/train/'
test_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/MultiClassClassification/PneumoniaClassesOnly/test/'
# ImageDataGenerator to load and preprocess the images
datagen = ImageDataGenerator(rescale=1./255)
# Load the training data
train_generator = datagen.flow_from_directory(
train_dir,
target_size=(224, 224), # Use 224x224 for EfficientNet
batch_size=32,
class_mode='binary',
shuffle=False
)
# Load the test data
test_generator = datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=32,
class_mode='binary',
shuffle=False
)
# Load EfficientNet model pre-trained on ImageNet without the classification head
base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# Freeze the base model layers
base_model.trainable = False
# Function to extract features
def extract_features(generator, model, batch_size):
features = model.predict(generator, steps=generator.samples // batch_size + 1)
return features
# Extract features for training and test sets
train_features = extract_features(train_generator, base_model, batch_size=32)
test_features = extract_features(test_generator, base_model, batch_size=32)
# Get the corresponding labels from the generators
train_labels = train_generator.classes
test_labels = test_generator.classes
# Reshape features to 2D array (flattened for Random Forest input)
train_features_flat = train_features.reshape(train_features.shape[0], -1)
test_features_flat = test_features.reshape(test_features.shape[0], -1)
# Binarize labels for multi-class ROC
n_classes = len(train_generator.class_indices)
train_labels_bin = label_binarize(train_labels, classes=np.arange(n_classes))
test_labels_bin = label_binarize(test_labels, classes=np.arange(n_classes))
# Train RandomForest in One-vs-Rest fashion
rf_classifier = OneVsRestClassifier(RandomForestClassifier(n_estimators=100, random_state=42))
rf_classifier.fit(train_features_flat, train_labels_bin)
# Make predictions on the test set
y_pred = rf_classifier.predict(test_features_flat)
# Calculate accuracy using argmax to get the predicted class labels
y_pred = np.argmax(y_pred, axis=1)
test_labels_max = np.argmax(test_labels_bin, axis=1)
accuracy = accuracy_score(test_labels_max, y_pred)
print(f"Accuracy: {accuracy * 100:.2f}%")
# Confusion Matrix
conf_matrix = confusion_matrix(test_labels, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=train_generator.class_indices.keys(), yticklabels=train_generator.class_indices.keys())
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.title('Confusion Matrix')
plt.show()
# Accuracy, Precision, Recall, F1 Score
precision = precision_score(test_labels, y_pred, average='weighted')
recall = recall_score(test_labels, y_pred, average='weighted')
f1 = f1_score(test_labels, y_pred, average='weighted')
print(f"Precision: {precision * 100:.2f}%")
print(f"Recall: {recall * 100:.2f}%")
print(f"F1 Score: {f1 * 100:.2f}%")
# Classification Report
print(classification_report(test_labels, y_pred, target_names=test_generator.class_indices.keys()))
# Predict probabilities for the test set
y_score = rf_classifier.predict_proba(test_features_flat)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(test_labels_bin[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Plot ROC curve for each class
plt.figure()
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'darkred', 'darkgreen'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label=f'Class {i} (area = {roc_auc[i]:.2f})')
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Covid-19, Bacteria and Viral Classification (EfficientNET-RF)')
plt.legend(loc="lower right")
plt.show()
Found 4400 images belonging to 3 classes. Found 1538 images belonging to 3 classes.
/usr/local/lib/python3.10/dist-packages/keras/src/trainers/data_adapters/py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored. self._warn_if_super_not_called()
138/138 ━━━━━━━━━━━━━━━━━━━━ 418s 3s/step 49/49 ━━━━━━━━━━━━━━━━━━━━ 143s 3s/step Accuracy: 82.64%
Precision: 86.96% Recall: 82.64% F1 Score: 83.48% precision recall f1-score support COVID-19 0.57 0.99 0.72 209 Pneumonia-Bacteria 0.97 0.80 0.88 1001 Pneumonia-Viral 0.76 0.80 0.78 328 accuracy 0.83 1538 macro avg 0.77 0.86 0.79 1538 weighted avg 0.87 0.83 0.83 1538
EfficientNET alone
Binary
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Paths to the train and test directories
train_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/BinaryClassification/Normal_Pneumonia/train/'
test_dir = 'drive/MyDrive/PneumoniaDataset/Curated X-Ray Dataset/BinaryClassification/Normal_Pneumonia/test/'
# ImageDataGenerator for loading and augmenting images
datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)
# Load training data
train_generator = datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='binary', # Use categorical for multi-class classification
subset='training'
)
# Load validation data
val_generator = datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=32,
class_mode='binary', # Ensure class_mode is categorical
subset='validation'
)
# Load EfficientNetB0 model pre-trained on ImageNet without the classification head
base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
# Add custom classification layers
x = base_model.output
x = GlobalAveragePooling2D()(x) # Flatten the output of EfficientNet
x = Dense(1024, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x) # 3 output units for 3 classes
# Define the model
model = Model(inputs=base_model.input, outputs=predictions)
# Freeze the base model layers
for layer in base_model.layers:
layer.trainable = False
# Compile the model
model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model on the data for a few epochs
history = model.fit(train_generator, validation_data=val_generator, epochs=5)
# Unfreeze some layers of the base model for fine-tuning
for layer in base_model.layers[-10:]: # Unfreeze the last 10 layers
layer.trainable = True
# Recompile the model
model.compile(optimizer=Adam(learning_rate=0.00001), loss='categorical_crossentropy', metrics=['accuracy'])
# Continue training for fine-tuning
history_finetune = model.fit(train_generator, validation_data=val_generator, epochs=5)
# Load test data
test_generator = datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=32,
class_mode='categorical',
shuffle=False
)
# Evaluate the model on the test data
test_loss, test_accuracy = model.evaluate(test_generator)
print(f'Test Accuracy: {test_accuracy * 100:.2f}%')
# Predict labels for the test data
y_pred = model.predict(test_generator)
y_pred_classes = np.argmax(y_pred, axis=1)
# Generate confusion matrix and classification report
print("Confusion Matrix")
cm = confusion_matrix(test_generator.classes, y_pred_classes)
print(cm)
# Plot confusion matrix
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=test_generator.class_indices.keys(), yticklabels=test_generator.class_indices.keys())
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.title('Confusion Matrix')
plt.show()
# Classification report
target_names = ['Normal', 'Pneumonia']
print("Classification Report")
print(classification_report(test_generator.classes, y_pred_classes, target_names=target_names))
Found 5920 images belonging to 2 classes. Found 1480 images belonging to 2 classes. Downloading data from https://storage.googleapis.com/keras-applications/efficientnetb0_notop.h5 16705208/16705208 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step Epoch 1/5
/usr/local/lib/python3.10/dist-packages/keras/src/losses/losses.py:27: SyntaxWarning: In loss categorical_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape=(None, 1). Consider using 'binary_crossentropy' if you only have 2 classes. return self.fn(y_true, y_pred, **self._fn_kwargs) /usr/local/lib/python3.10/dist-packages/keras/src/trainers/data_adapters/py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored. self._warn_if_super_not_called()
185/185 ━━━━━━━━━━━━━━━━━━━━ 1024s 5s/step - accuracy: 0.4092 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 2/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 702s 4s/step - accuracy: 0.4144 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 3/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 704s 4s/step - accuracy: 0.4139 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 4/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 734s 4s/step - accuracy: 0.4067 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 5/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 711s 4s/step - accuracy: 0.4068 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 1/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 761s 4s/step - accuracy: 0.5663 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 2/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 774s 4s/step - accuracy: 0.3993 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 3/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 713s 4s/step - accuracy: 0.4146 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 4/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 742s 4s/step - accuracy: 0.4121 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Epoch 5/5 185/185 ━━━━━━━━━━━━━━━━━━━━ 713s 4s/step - accuracy: 0.4027 - loss: 0.0000e+00 - val_accuracy: 0.4054 - val_loss: 0.0000e+00 Found 1808 images belonging to 2 classes.
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-1-b40af6a947c6> in <cell line: 79>() 77 78 # Evaluate the model on the test data ---> 79 test_loss, test_accuracy = model.evaluate(test_generator) 80 print(f'Test Accuracy: {test_accuracy * 100:.2f}%') 81 /usr/local/lib/python3.10/dist-packages/keras/src/utils/traceback_utils.py in error_handler(*args, **kwargs) 120 # To get the full stack trace, call: 121 # `keras.config.disable_traceback_filtering()` --> 122 raise e.with_traceback(filtered_tb) from None 123 finally: 124 del filtered_tb /usr/local/lib/python3.10/dist-packages/keras/src/losses/loss.py in squeeze_or_expand_to_same_rank(x1, x2, expand_rank_1) 88 def squeeze_or_expand_to_same_rank(x1, x2, expand_rank_1=True): 89 """Squeeze/expand last dim if ranks differ from expected by exactly 1.""" ---> 90 x1_rank = len(x1.shape) 91 x2_rank = len(x2.shape) 92 if x1_rank == x2_rank: ValueError: Cannot take the length of shape with unknown rank.