Downloading Dataset
In [1]:
!wget -N "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/Alz_data.zip"
!unzip -qo Alz_data.zip
!rm Alz_data.zip
Importing Necessary Libraries
In [2]:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
%matplotlib inline
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from pylab import imread,subplot,imshow,show
import cv2
import os
Rescaling
In [3]:
train = ImageDataGenerator(rescale=1./255)
test = ImageDataGenerator(rescale=1./255)
val = ImageDataGenerator(rescale=1./255)
In [4]:
train='Alzheimer_s Dataset/train/'
In [5]:
train_data = tf.keras.preprocessing.image_dataset_from_directory(
train,
validation_split=0.2,
image_size=(224,224),
batch_size=32,
subset='training',
seed=1000 )
In [6]:
val='Alzheimer_s Dataset/train/'
In [7]:
val_data = tf.keras.preprocessing.image_dataset_from_directory(
val,
validation_split=0.2,
image_size=(224,224),
batch_size=32,
subset='validation',
seed=1000
)
In [8]:
test='Alzheimer_s Dataset/test/'
In [9]:
test_data=tf.keras.preprocessing.image_dataset_from_directory(
test,
image_size=(224,224),
batch_size=32,
seed=1000
)
In [10]:
class_names = ['MildDementia', 'ModerateDementia', 'NonDementia', 'VeryMildDementia']
In [11]:
train_data.class_names = class_names
val_data.class_names = class_names
In [12]:
print(val_data)
In [13]:
plt.figure(figsize=(10, 10))
for images, labels in train_data.take(1):
for i in range(6):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(train_data.class_names[labels[i]])
plt.axis("off")
In [14]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Input
from tensorflow.keras.layers import Dense
In [15]:
model=Sequential()
In [16]:
model.add(Conv2D(16,(3,3), activation='relu', input_shape=(224,224,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10,activation='relu'))
model.add(Dense(5,activation='relu'))
model.add(Dense(12,activation='relu'))
model.add(Dense(30,activation='relu'))
model.add(Dense(10,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(133,activation='relu'))
model.add(Dense(4,activation='softmax'))
In [17]:
model.summary()
In [18]:
model.compile(optimizer = tf.keras.optimizers.Adam(1e-4), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
In [19]:
history = model.fit(train_data, validation_data=val_data, epochs=10)
Model Saving
In [20]:
model.save("alz_model1.h5")
In [21]:
model.evaluate(val_data)
Out[21]:
Graph Plotting
In [22]:
plt.plot(history.history['accuracy'])
plt.title('Train model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [23]:
plt.plot(history.history['loss'])
plt.title('Train Model loss')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [24]:
plt.plot(history.history['val_accuracy'])
plt.title(' Val model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [25]:
plt.plot(history.history['val_loss'])
plt.title(' Val Model Loss')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [26]:
loss_train = history.history['loss']
loss_val = history.history['val_loss']
plt.plot(loss_train, 'g', label='Training loss')
plt.plot(loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
In [27]:
accuracy_train = history.history['accuracy']
accuracy_val = history.history['val_accuracy']
plt.plot(accuracy_train, 'g', label='Training accuracy')
plt.plot(accuracy_val, 'b', label='Validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
Prediction
In [28]:
class_names={0:"MildDementia", 1:"ModerateDementia", 2:"NonDementia", 3:"VeryMildDementia"}
In [29]:
for images, labels in val_data.take(1):
for i in range(6):
print("True_class:",val_data.class_names[labels[i]])
x = image.img_to_array(images[i])
x = np.expand_dims(x, axis=0)
p=np.argmax(model.predict(x))
if p==0:
print("Predicted Image: Mild Dementia")
elif p==1:
print("Predicted Image: Moderate Dementia")
elif p==2:
print("Predicted Image: Non Dementia")
else:
print("Predicted Image: Very Mild Dementia")
print("Predicted class:",p)
print("All the predicted images are correct!!!!!")
deepCC¶
In [30]:
!deepCC alz_model1.h5