Cainvas

Parkinson's Disease Detection using Parkinson's Spiral Drawing and CNN

The dataset was taken from this link

Importing the required Libraries and packages

In [2]:
import numpy as np
import cv2
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Dropout, Flatten, Dense, MaxPool2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.optimizers import Adam, SGD
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns
from tensorflow.keras.regularizers import l2
import tensorflow as tf
import pandas as pd
In [3]:
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/Yuvnish17/Parkinson_disease_detection.zip
!unzip -qo Parkinson_disease_detection.zip
--2021-07-10 06:16:13--  https://cainvas-static.s3.amazonaws.com/media/user_data/Yuvnish17/Parkinson_disease_detection.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.62.120
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.62.120|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 8201907 (7.8M) [application/x-zip-compressed]
Saving to: ‘Parkinson_disease_detection.zip’

Parkinson_disease_d 100%[===================>]   7.82M  --.-KB/s    in 0.05s   

2021-07-10 06:16:13 (165 MB/s) - ‘Parkinson_disease_detection.zip’ saved [8201907/8201907]

Loading Dataset

In [4]:
data_train = np.load('Parkinson_disease_detection/train_set.npz', allow_pickle=True)
x_train = data_train['arr_0']
y_train = data_train['arr_1']

print(x_train.shape)
print(y_train.shape)
(72, 256, 256, 3)
(72,)
In [5]:
data_test = np.load('Parkinson_disease_detection/test_set.npz', allow_pickle=True)
x_test = data_test['arr_0']
y_test = data_test['arr_1']

print(x_test.shape)
print(y_test.shape)
(30, 256, 256, 3)
(30,)

Data Distribution

In [6]:
unique_train, count = np.unique(y_train, return_counts=True)
plt.figure(figsize=(20, 10))
sns.barplot(unique_train, count).set_title("Number of training images per category:")
plt.show()
/opt/tljh/user/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  FutureWarning
In [7]:
unique_test, count_test = np.unique(y_test, return_counts=True)
plt.figure(figsize=(20, 10))
sns.barplot(unique_test, count_test).set_title("Number of testing images per category:")
plt.show()
/opt/tljh/user/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  FutureWarning

As can be seen, though the dataset is balanced but contains very less data points

Augmenting the Dataset

In [8]:
train_data_generator = ImageDataGenerator(rotation_range=360, 
                                    width_shift_range=0.0, 
                                    height_shift_range=0.0, 
#                                     brightness_range=[0.5, 1.5],
                                    horizontal_flip=True, 
                                    vertical_flip=True)

x = list(x_train)
y = list(y_train)

x_aug_train = []
y_aug_train = []

for (i, v) in enumerate(y):
    x_img = x[i]
    x_img = np.array(x_img)
    x_img = np.expand_dims(x_img, axis=0)
    aug_iter = train_data_generator.flow(x_img, batch_size=1, shuffle=True)
    for j in range(70):
        aug_image = next(aug_iter)[0].astype('uint8')
        x_aug_train.append(aug_image)
        y_aug_train.append(v)
print(len(x_aug_train))
print(len(y_aug_train))

x_train = x + x_aug_train
y_train = y + y_aug_train
print(len(x_train))
print(len(y_train))

test_data_generator = ImageDataGenerator(rotation_range=360, 
                                    width_shift_range=0.0, 
                                    height_shift_range=0.0, 
#                                     brightness_range=[0.5, 1.5],
                                    horizontal_flip=True, 
                                    vertical_flip=True)

x = list(x_test)
y = list(y_test)

x_aug_test = []
y_aug_test = []

for (i, v) in enumerate(y):
    x_img = x[i]
    x_img = np.array(x_img)
    x_img = np.expand_dims(x_img, axis=0)
    aug_iter = test_data_generator.flow(x_img, batch_size=1, shuffle=True)
    for j in range(20):
        aug_image = next(aug_iter)[0].astype('uint8')
        x_aug_test.append(aug_image)
        y_aug_test.append(v)
print(len(x_aug_test))
print(len(y_aug_test))

x_test = x + x_aug_test
y_test = y + y_aug_test
print(len(x_test))
print(len(y_test))
5040
5040
5112
5112
600
600
630
630

Visualizing the Images in Train and Test Set

Images in Train Set
In [9]:
# print(y_test)
figure1 = plt.figure(figsize=(5, 5))
idx_healthy = [i for (i, v) in enumerate(y_train) if v=='healthy']
img_healthy = x_train[idx_healthy[-1]]
plt.imshow(img_healthy)
plt.title('Spiral Drawing by a Healthy Person')
plt.axis('off')
plt.show()

figure2 = plt.figure(figsize=(5, 5))
idx_parkinson = [i for (i, v) in enumerate(y_train) if v=='parkinson']
img_parkinson = x_train[idx_parkinson[-1]]
plt.imshow(img_parkinson)
plt.title("Spiral Drawing by a Person having Parkinson's Disease")
plt.axis('off')
plt.show()

Images in Test Set

In [10]:
figure1 = plt.figure(figsize=(5, 5))
idx_healthy = [i for (i, v) in enumerate(y_test) if v=='healthy']
img_healthy = x_test[idx_healthy[-1]]
plt.imshow(img_healthy)
plt.title('Spiral Drawing by a Healthy Person')
plt.axis('off')
plt.show()

figure2 = plt.figure(figsize=(5, 5))
idx_parkinson = [i for (i, v) in enumerate(y_test) if v=='parkinson']
img_parkinson = x_test[idx_parkinson[-1]]
plt.imshow(img_parkinson)
plt.title("Spiral Drawing by a Person having Parkinson's Disease")
plt.axis('off')
plt.show()

Preprocessing the Images

In [11]:
for i in range(len(x_train)):
    img = x_train[i]
    img = cv2.resize(img, (128, 128))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    x_train[i] = img
    
for i in range(len(x_test)):
    img = x_test[i]
    img = cv2.resize(img, (128, 128))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    x_test[i] = img

x_train = np.array(x_train)
x_test = np.array(x_test)

x_train = x_train/255.0
x_test = x_test/255.0

label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
print(y_train.shape)

label_encoder = LabelEncoder()
y_test = label_encoder.fit_transform(y_test)
print(y_test.shape)
(5112,)
(630,)

Data Distribution after Augmentation

In [12]:
unique_train, count = np.unique(y_train, return_counts=True)
plt.figure(figsize=(20, 10))
sns.barplot(unique_train, count).set_title("Number of training images per category after augmentation:")
plt.show()
/opt/tljh/user/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  FutureWarning
In [13]:
unique_test, count_test = np.unique(y_test, return_counts=True)
plt.figure(figsize=(20, 10))
sns.barplot(unique_test, count_test).set_title("Number of test set images per category after augmentation:")
plt.show()
/opt/tljh/user/lib/python3.7/site-packages/seaborn/_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
  FutureWarning
In [14]:
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

x_train = np.expand_dims(x_train, axis=-1)
x_test = np.expand_dims(x_test, axis=-1)

print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
(5112, 128, 128, 1)
(5112, 2)
(630, 128, 128, 1)
(630, 2)

Defining the Model

In [18]:
def parkinson_disease_detection_model(input_shape=(128, 128, 1)):
    regularizer = tf.keras.regularizers.l2(0.001)
    model = Sequential()
    model.add(Input(shape=input_shape))
    model.add(Conv2D(128, (5, 5), padding='same', strides=(1, 1), name='conv1', activation='relu', 
                     kernel_initializer='glorot_uniform', kernel_regularizer=regularizer))
    model.add(MaxPool2D((9, 9), strides=(3, 3)))

    model.add(Conv2D(64, (5, 5), padding='same', strides=(1, 1), name='conv2', activation='relu', 
                     kernel_initializer='glorot_uniform', kernel_regularizer=regularizer))
    model.add(MaxPool2D((7, 7), strides=(3, 3)))
    
    model.add(Conv2D(32, (3, 3), padding='same', strides=(1, 1), name='conv3', activation='relu', 
                     kernel_initializer='glorot_uniform', kernel_regularizer=regularizer))
    model.add(MaxPool2D((5, 5), strides=(2, 2)))

    model.add(Conv2D(32, (3, 3), padding='same', strides=(1, 1), name='conv4', activation='relu', 
                     kernel_initializer='glorot_uniform', kernel_regularizer=regularizer))
    model.add(MaxPool2D((3, 3), strides=(2, 2)))    
    
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu', kernel_initializer='glorot_uniform', name='fc1'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax', kernel_initializer='glorot_uniform', name='fc3'))
    
    optimizer = Adam(3.15e-5)
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    return model
In [19]:
model= parkinson_disease_detection_model(input_shape=(128, 128, 1))
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 128, 128, 128)     3328      
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 40, 40, 128)       0         
_________________________________________________________________
conv2 (Conv2D)               (None, 40, 40, 64)        204864    
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 12, 12, 64)        0         
_________________________________________________________________
conv3 (Conv2D)               (None, 12, 12, 32)        18464     
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 4, 4, 32)          0         
_________________________________________________________________
conv4 (Conv2D)               (None, 4, 4, 32)          9248      
_________________________________________________________________
max_pooling2d_7 (MaxPooling2 (None, 1, 1, 32)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 32)                0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 32)                0         
_________________________________________________________________
fc1 (Dense)                  (None, 64)                2112      
_________________________________________________________________
dropout_3 (Dropout)          (None, 64)                0         
_________________________________________________________________
fc3 (Dense)                  (None, 2)                 130       
=================================================================
Total params: 238,146
Trainable params: 238,146
Non-trainable params: 0
_________________________________________________________________

Training the Model

In [21]:
hist = model.fit(x_train, y_train, batch_size=128, epochs=70, validation_data=(x_test, y_test))
Epoch 1/70
 2/40 [>.............................] - ETA: 5s - loss: 0.8416 - accuracy: 0.5312WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0525s vs `on_train_batch_end` time: 0.0910s). Check your callbacks.
40/40 [==============================] - 6s 151ms/step - loss: 0.8406 - accuracy: 0.5074 - val_loss: 0.8369 - val_accuracy: 0.5000
Epoch 2/70
40/40 [==============================] - 6s 151ms/step - loss: 0.8345 - accuracy: 0.4973 - val_loss: 0.8304 - val_accuracy: 0.5000
Epoch 3/70
40/40 [==============================] - 7s 169ms/step - loss: 0.8280 - accuracy: 0.4963 - val_loss: 0.8245 - val_accuracy: 0.5000
Epoch 4/70
40/40 [==============================] - 11s 268ms/step - loss: 0.8221 - accuracy: 0.5057 - val_loss: 0.8188 - val_accuracy: 0.5000
Epoch 5/70
40/40 [==============================] - 11s 269ms/step - loss: 0.8164 - accuracy: 0.5025 - val_loss: 0.8134 - val_accuracy: 0.5000
Epoch 6/70
40/40 [==============================] - 11s 269ms/step - loss: 0.8113 - accuracy: 0.5121 - val_loss: 0.8085 - val_accuracy: 0.5000
Epoch 7/70
40/40 [==============================] - 11s 269ms/step - loss: 0.8063 - accuracy: 0.5090 - val_loss: 0.8039 - val_accuracy: 0.5032
Epoch 8/70
40/40 [==============================] - 10s 261ms/step - loss: 0.8021 - accuracy: 0.5027 - val_loss: 0.7994 - val_accuracy: 0.6825
Epoch 9/70
40/40 [==============================] - 11s 267ms/step - loss: 0.7975 - accuracy: 0.5182 - val_loss: 0.7950 - val_accuracy: 0.6254
Epoch 10/70
40/40 [==============================] - 11s 269ms/step - loss: 0.7938 - accuracy: 0.5027 - val_loss: 0.7912 - val_accuracy: 0.5175
Epoch 11/70
40/40 [==============================] - 11s 270ms/step - loss: 0.7900 - accuracy: 0.5231 - val_loss: 0.7874 - val_accuracy: 0.6571
Epoch 12/70
40/40 [==============================] - 11s 270ms/step - loss: 0.7864 - accuracy: 0.5153 - val_loss: 0.7836 - val_accuracy: 0.5492
Epoch 13/70
40/40 [==============================] - 11s 270ms/step - loss: 0.7832 - accuracy: 0.5160 - val_loss: 0.7816 - val_accuracy: 0.5000
Epoch 14/70
40/40 [==============================] - 11s 270ms/step - loss: 0.7796 - accuracy: 0.5276 - val_loss: 0.7774 - val_accuracy: 0.6016
Epoch 15/70
40/40 [==============================] - 11s 271ms/step - loss: 0.7762 - accuracy: 0.5186 - val_loss: 0.7741 - val_accuracy: 0.5429
Epoch 16/70
40/40 [==============================] - 11s 271ms/step - loss: 0.7723 - accuracy: 0.5468 - val_loss: 0.7704 - val_accuracy: 0.6603
Epoch 17/70
40/40 [==============================] - 11s 272ms/step - loss: 0.7703 - accuracy: 0.5491 - val_loss: 0.7683 - val_accuracy: 0.5317
Epoch 18/70
40/40 [==============================] - 11s 271ms/step - loss: 0.7655 - accuracy: 0.5603 - val_loss: 0.7629 - val_accuracy: 0.5937
Epoch 19/70
40/40 [==============================] - 11s 273ms/step - loss: 0.7639 - accuracy: 0.5379 - val_loss: 0.7607 - val_accuracy: 0.5698
Epoch 20/70
40/40 [==============================] - 11s 264ms/step - loss: 0.7588 - accuracy: 0.5759 - val_loss: 0.7528 - val_accuracy: 0.7603
Epoch 21/70
40/40 [==============================] - 8s 205ms/step - loss: 0.7553 - accuracy: 0.5739 - val_loss: 0.7516 - val_accuracy: 0.5841
Epoch 22/70
40/40 [==============================] - 11s 272ms/step - loss: 0.7503 - accuracy: 0.5843 - val_loss: 0.7464 - val_accuracy: 0.5968
Epoch 23/70
40/40 [==============================] - 11s 275ms/step - loss: 0.7438 - accuracy: 0.6225 - val_loss: 0.7376 - val_accuracy: 0.6317
Epoch 24/70
40/40 [==============================] - 11s 276ms/step - loss: 0.7378 - accuracy: 0.6205 - val_loss: 0.7281 - val_accuracy: 0.7079
Epoch 25/70
40/40 [==============================] - 11s 277ms/step - loss: 0.7276 - accuracy: 0.6399 - val_loss: 0.7160 - val_accuracy: 0.7952
Epoch 26/70
40/40 [==============================] - 11s 269ms/step - loss: 0.7163 - accuracy: 0.6520 - val_loss: 0.7058 - val_accuracy: 0.6984
Epoch 27/70
40/40 [==============================] - 11s 276ms/step - loss: 0.7084 - accuracy: 0.6577 - val_loss: 0.7111 - val_accuracy: 0.6254
Epoch 28/70
40/40 [==============================] - 11s 276ms/step - loss: 0.6999 - accuracy: 0.6706 - val_loss: 0.6852 - val_accuracy: 0.7063
Epoch 29/70
40/40 [==============================] - 11s 276ms/step - loss: 0.6795 - accuracy: 0.7081 - val_loss: 0.6692 - val_accuracy: 0.7794
Epoch 30/70
40/40 [==============================] - 11s 277ms/step - loss: 0.6743 - accuracy: 0.7027 - val_loss: 0.6598 - val_accuracy: 0.7810
Epoch 31/70
40/40 [==============================] - 11s 276ms/step - loss: 0.6602 - accuracy: 0.7193 - val_loss: 0.6459 - val_accuracy: 0.7032
Epoch 32/70
40/40 [==============================] - 11s 277ms/step - loss: 0.6433 - accuracy: 0.7340 - val_loss: 0.6831 - val_accuracy: 0.6444
Epoch 33/70
40/40 [==============================] - 11s 278ms/step - loss: 0.6348 - accuracy: 0.7445 - val_loss: 0.6210 - val_accuracy: 0.7889
Epoch 34/70
40/40 [==============================] - 11s 277ms/step - loss: 0.6210 - accuracy: 0.7502 - val_loss: 0.6264 - val_accuracy: 0.6730
Epoch 35/70
40/40 [==============================] - 11s 278ms/step - loss: 0.6150 - accuracy: 0.7496 - val_loss: 0.6214 - val_accuracy: 0.6714
Epoch 36/70
40/40 [==============================] - 11s 278ms/step - loss: 0.6052 - accuracy: 0.7563 - val_loss: 0.5906 - val_accuracy: 0.7730
Epoch 37/70
40/40 [==============================] - 11s 277ms/step - loss: 0.5885 - accuracy: 0.7653 - val_loss: 0.5831 - val_accuracy: 0.7571
Epoch 38/70
40/40 [==============================] - 11s 272ms/step - loss: 0.5811 - accuracy: 0.7705 - val_loss: 0.5992 - val_accuracy: 0.6762
Epoch 39/70
40/40 [==============================] - 7s 182ms/step - loss: 0.5697 - accuracy: 0.7799 - val_loss: 0.5748 - val_accuracy: 0.6921
Epoch 40/70
40/40 [==============================] - 6s 158ms/step - loss: 0.5749 - accuracy: 0.7815 - val_loss: 0.5607 - val_accuracy: 0.8206
Epoch 41/70
40/40 [==============================] - 6s 157ms/step - loss: 0.5617 - accuracy: 0.7844 - val_loss: 0.5802 - val_accuracy: 0.6889
Epoch 42/70
40/40 [==============================] - 6s 158ms/step - loss: 0.5501 - accuracy: 0.7878 - val_loss: 0.5558 - val_accuracy: 0.7190
Epoch 43/70
40/40 [==============================] - 6s 158ms/step - loss: 0.5538 - accuracy: 0.7885 - val_loss: 0.5511 - val_accuracy: 0.7175
Epoch 44/70
40/40 [==============================] - 6s 158ms/step - loss: 0.5285 - accuracy: 0.7979 - val_loss: 0.5371 - val_accuracy: 0.7349
Epoch 45/70
40/40 [==============================] - 6s 157ms/step - loss: 0.5315 - accuracy: 0.7960 - val_loss: 0.5226 - val_accuracy: 0.7651
Epoch 46/70
40/40 [==============================] - 6s 157ms/step - loss: 0.5149 - accuracy: 0.8124 - val_loss: 0.5175 - val_accuracy: 0.7857
Epoch 47/70
40/40 [==============================] - 6s 157ms/step - loss: 0.5185 - accuracy: 0.8056 - val_loss: 0.5088 - val_accuracy: 0.7921
Epoch 48/70
40/40 [==============================] - 6s 158ms/step - loss: 0.5069 - accuracy: 0.8132 - val_loss: 0.5037 - val_accuracy: 0.7762
Epoch 49/70
40/40 [==============================] - 6s 158ms/step - loss: 0.5038 - accuracy: 0.8167 - val_loss: 0.5012 - val_accuracy: 0.8079
Epoch 50/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4907 - accuracy: 0.8243 - val_loss: 0.5280 - val_accuracy: 0.7556
Epoch 51/70
40/40 [==============================] - 6s 157ms/step - loss: 0.4948 - accuracy: 0.8232 - val_loss: 0.4934 - val_accuracy: 0.8079
Epoch 52/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4850 - accuracy: 0.8273 - val_loss: 0.4933 - val_accuracy: 0.7921
Epoch 53/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4810 - accuracy: 0.8263 - val_loss: 0.4847 - val_accuracy: 0.8000
Epoch 54/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4728 - accuracy: 0.8353 - val_loss: 0.5053 - val_accuracy: 0.7714
Epoch 55/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4667 - accuracy: 0.8380 - val_loss: 0.4779 - val_accuracy: 0.8000
Epoch 56/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4544 - accuracy: 0.8378 - val_loss: 0.4692 - val_accuracy: 0.8032
Epoch 57/70
40/40 [==============================] - 6s 157ms/step - loss: 0.4543 - accuracy: 0.8435 - val_loss: 0.4754 - val_accuracy: 0.8143
Epoch 58/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4423 - accuracy: 0.8474 - val_loss: 0.4643 - val_accuracy: 0.8000
Epoch 59/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4401 - accuracy: 0.8558 - val_loss: 0.4587 - val_accuracy: 0.8127
Epoch 60/70
40/40 [==============================] - 6s 158ms/step - loss: 0.4335 - accuracy: 0.8500 - val_loss: 0.4535 - val_accuracy: 0.8190
Epoch 61/70
40/40 [==============================] - 6s 159ms/step - loss: 0.4330 - accuracy: 0.8554 - val_loss: 0.4487 - val_accuracy: 0.8238
Epoch 62/70
40/40 [==============================] - 11s 274ms/step - loss: 0.4259 - accuracy: 0.8638 - val_loss: 0.4460 - val_accuracy: 0.8302
Epoch 63/70
40/40 [==============================] - 11s 278ms/step - loss: 0.4163 - accuracy: 0.8633 - val_loss: 0.4472 - val_accuracy: 0.8143
Epoch 64/70
40/40 [==============================] - 11s 278ms/step - loss: 0.4164 - accuracy: 0.8640 - val_loss: 0.4418 - val_accuracy: 0.8270
Epoch 65/70
40/40 [==============================] - 11s 277ms/step - loss: 0.4143 - accuracy: 0.8625 - val_loss: 0.4491 - val_accuracy: 0.8206
Epoch 66/70
40/40 [==============================] - 11s 271ms/step - loss: 0.4023 - accuracy: 0.8715 - val_loss: 0.4425 - val_accuracy: 0.8222
Epoch 67/70
40/40 [==============================] - 11s 279ms/step - loss: 0.3984 - accuracy: 0.8732 - val_loss: 0.4353 - val_accuracy: 0.8238
Epoch 68/70
40/40 [==============================] - 11s 277ms/step - loss: 0.3918 - accuracy: 0.8758 - val_loss: 0.4358 - val_accuracy: 0.8254
Epoch 69/70
40/40 [==============================] - 11s 278ms/step - loss: 0.3961 - accuracy: 0.8748 - val_loss: 0.4244 - val_accuracy: 0.8524
Epoch 70/70
40/40 [==============================] - 11s 277ms/step - loss: 0.3950 - accuracy: 0.8734 - val_loss: 0.4325 - val_accuracy: 0.8270

Loss and Accuracy Plot

In [22]:
figure = plt.figure(figsize=(10, 10))
plt.plot(hist.history['accuracy'], label='Train_accuracy')
plt.plot(hist.history['val_accuracy'], label='Test_accuracy')
plt.title('Model Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc="upper left")
plt.show()

figure2 = plt.figure(figsize=(10, 10))
plt.plot(hist.history['loss'], label='Train_loss')
plt.plot(hist.history['val_loss'], label='Test_loss')
plt.title('Model Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(loc="upper left")
plt.show()

Classification Report

In [23]:
ypred = model.predict(x_test)
ypred = np.argmax(ypred, axis=1)
y_test_pred = np.argmax(y_test, axis=1)
print(classification_report(y_test_pred, ypred))
              precision    recall  f1-score   support

           0       0.88      0.75      0.81       315
           1       0.78      0.90      0.84       315

    accuracy                           0.83       630
   macro avg       0.83      0.83      0.83       630
weighted avg       0.83      0.83      0.83       630

Confusion Matrix

In [24]:
matrix = confusion_matrix(y_test_pred, ypred)
df_cm = pd.DataFrame(matrix, index=[0, 1], columns=[0, 1])
figure = plt.figure(figsize=(5, 5))
sns.heatmap(df_cm, annot=True, fmt='d')
Out[24]:
<AxesSubplot:>

Saving the Model

In [25]:
model.save('parkinson_disease_detection.h5')

Testing Model on Images

In [26]:
labels = ['Healthy', 'Parkinson']
image_healthy = cv2.imread('Parkinson_disease_detection/test_image_healthy.png')
image_parkinson = cv2.imread('Parkinson_disease_detection/test_image_parkinson.png')

image_healthy = cv2.resize(image_healthy, (128, 128))
image_healthy = cv2.cvtColor(image_healthy, cv2.COLOR_BGR2GRAY)
image_healthy = np.array(image_healthy)
image_healthy = np.expand_dims(image_healthy, axis=0)
image_healthy = np.expand_dims(image_healthy, axis=-1)

image_parkinson = cv2.resize(image_parkinson, (128, 128))
image_parkinson = cv2.cvtColor(image_parkinson, cv2.COLOR_BGR2GRAY)
image_parkinson = np.array(image_parkinson)
image_parkinson = np.expand_dims(image_parkinson, axis=0)
image_parkinson = np.expand_dims(image_parkinson, axis=-1)
In [27]:
ypred_healthy = model.predict(image_healthy)
ypred_parkinson = model.predict(image_parkinson)
In [28]:
figure = plt.figure(figsize=(2, 2))
img_healthy = np.squeeze(image_healthy, axis=0)
plt.imshow(img_healthy)
plt.axis('off')
plt.title(f'Prediction by the model: {labels[np.argmax(ypred_healthy[0], axis=0)]}')
plt.show()
In [30]:
figure = plt.figure(figsize=(2, 2))
image_parkinson = np.squeeze(image_parkinson, axis=0)
plt.imshow(image_parkinson)
plt.axis('off')
plt.title(f'Prediction by the model: {labels[np.argmax(ypred_parkinson[0], axis=0)]}')
plt.show()
In [ ]: