-
Notifications
You must be signed in to change notification settings - Fork 0
/
braintumor.py
196 lines (162 loc) · 5.54 KB
/
braintumor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
from idlelib import history
import numpy as np
import pandas as pd
import silence_tensorflow.auto
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import *
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.preprocessing import image_dataset_from_directory
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import os
import datetime
warnings.filterwarnings('ignore')
if not os.path.exists('model_checkpoints'):
os.makedirs('model_checkpoints')
train_dir = 'Training'
test_dir = 'Testing'
# Image params
batch_size = 32
img_height = 300
img_width = 300
# Create a training dataset
train_dataset = image_dataset_from_directory(
train_dir,
validation_split=0.2,
subset="training",
seed=13,
image_size=(img_height, img_width),
batch_size=batch_size
)
# Create a validation dataset
validation_dataset = image_dataset_from_directory(
train_dir,
validation_split=0.2,
subset="validation",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size
)
# Create a testing dataset
test_dataset = image_dataset_from_directory(
test_dir,
image_size=(img_height, img_width),
batch_size=batch_size
)
# Get list of class names
class_names = train_dataset.class_names
# Prepare datasets
train_dataset = train_dataset.cache().shuffle(1024).prefetch(buffer_size=tf.data.AUTOTUNE)
validation_dataset = validation_dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
test_dataset = test_dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
# Display sample images
plt.figure(figsize=(15, 10))
for images, labels in train_dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
plt.show()
# Load the base model and add custom layers
base_model = tf.keras.applications.EfficientNetV2B1(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
base_model.trainable = False
for layer in base_model.layers[:251]:
layer.trainable = False
for layer in base_model.layers[251:]:
layer.trainable = True
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01))(x)
# Create predictions layer
predictions = Dense(4, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.01))(x)
# Creating a new model
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer=tf.keras.optimizers.Adam(1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
# Load the weights of the model you want to continue training with
weights_path = 'model_checkpoints/best_model.h5'
if os.path.exists(weights_path):
model.load_weights(weights_path)
else:
print(f"No weights file found at {weights_path}, training from scratch.")
best_model_path = 'model_checkpoints/best_model.h5'
# Setting up checkpoints callbacks
checkpoint_callback = ModelCheckpoint(
filepath='model_checkpoints/model-{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss',
save_freq='epoch',
save_best_only=True,
save_weights_only=False,
mode='min',
verbose=1
)
early_stopping = EarlyStopping(
monitor='val_loss',
patience=5,
verbose=1,
mode='min',
restore_best_weights=True
)
# Setting up learning rate reduction callback
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=5,
min_lr=1e-6
)
# Number of training epochs
NUM_EPOCHS = 10
# Train the model and save the history
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=NUM_EPOCHS,
verbose=1,
callbacks=[checkpoint_callback, early_stopping, reduce_lr]
)
# After the training, save the best model separately
model.save(best_model_path)
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
if not os.path.exists('plots'):
os.makedirs('plots')
# Plot the training history
history_df = pd.DataFrame(history.history)
plt.figure(figsize = (14, 7))
sns.lineplot(data=history_df[['accuracy', 'val_accuracy']], markers = True)
plt.title('Accuracy Plot')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.savefig(f"plots/Accuracy_Plot_{now}.png")
plt.show()
# Plot the training history again but with loss and val_loss
plt.figure(figsize = (14, 7))
sns.lineplot(data=history_df[['loss', 'val_loss']], markers = True)
plt.title('Loss Plot')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig(f"plots/Loss_Plot_{now}.png")
plt.show()
# Evaluate the model
print("Testing Metrics are as Follows: ")
model.evaluate(test_dataset, return_dict = True)
# Create a confusion matrix
predictions = np.argmax(model.predict(test_dataset), axis = 1)
true_labels = []
for images, labels in test_dataset:
true_labels.extend(labels.numpy())
conf_matrix = confusion_matrix(true_labels, predictions)
conf_df = pd.DataFrame(conf_matrix, index=class_names, columns=class_names)
# Plot confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(conf_df, annot=True, fmt='d', cmap='Blues')
plt.title('Confusion Matrix')
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.savefig(f"plots/Confusion Matrix_{now}.png")
plt.show()