This is my model's architecture:
def generate_network_2 (lr = 3e-3, drop = 0.4):
#filters = 64
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(64,kernel_size = 5, strides = 2, activation = 'relu', padding = "same",
input_shape = (312, 312, 3)))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.MaxPooling2D((2,2), padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Conv2D(128, kernel_size = 3, activation="relu", padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Conv2D(128, kernel_size = 3, activation="relu", padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.MaxPooling2D((2,2), padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Conv2D(256, kernel_size = 3, activation="relu", padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Conv2D(256, kernel_size = 3, activation="relu", padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.MaxPooling2D((2,2), padding = "same"))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Flatten())
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Dense(64, activation = "relu"))
model.add(keras.layers.Dropout(drop))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Dense(16,activation = "relu"))
model.add(keras.layers.Dropout(drop))
model.add(keras.layers.normalization.BatchNormalization())
model.add(keras.layers.Dense(3, activation = "softmax"))
optimizer = keras.optimizers.Adam(learning_rate = lr)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
return model
When I run model.evaluate on the validation dataset, I get a 69% accuracy. However, when I manually calculate the accuracy and the confusion matrix using model.predict, I got ~ 31%
model.evaluate(valid_gen)
4/4 [==============================] - 2s 506ms/step - loss: 0.7732 - accuracy: 0.6917
[0.773181676864624, 0.6916666626930237]
def print_cm(cm, labels):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels])
# Print header
print(" " * columnwidth, end="")
for label in labels:
print("%{0}s".format(columnwidth) % label, end="")
print()
# Print rows
for i, label1 in enumerate(labels):
print("%{0}s".format(columnwidth) % label1, end="")
for j in range(len(labels)):
print("%{0}d".format(columnwidth) % cm[i, j], end="")
print()
pred = np.argmax(model.predict(valid_data), axis=-1)
actual = valid_data.classes
cm = confusion_matrix(actual,pred)
print('Confusion Matrix')
print_cm(cm, ["0", "1", "2"])
Confusion Matrix
0 1 2
0 9 12 15
1 8 16 14
2 12 19 15
Classification Report
precision recall f1-score support
0 0.31 0.25 0.28 36
1 0.34 0.42 0.38 38
2 0.34 0.33 0.33 46
accuracy 0.33 120
macro avg 0.33 0.33 0.33 120
weighted avg 0.33 0.33 0.33 12