In [1]:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
%matplotlib inline
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from pylab import imread,subplot,imshow,show
import cv2
import os
In [2]:
!wget -N "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/CNN_model_dataset.zip"
!unzip -qo CNN_model_dataset.zip
!rm CNN_model_dataset.zip
In [3]:
fig = plt.figure(figsize=(13, 10))
path = r'data/train/benign/100.jpg'
Image1 = cv2.imread(path,0)
path_1 = r'data/train/benign/1021.jpg'
Image2 = cv2.imread(path_1,0)
path_2 = r'data/train/malignant/19.jpg'
Image3 = cv2.imread(path_2,0)
path_3 = r'data/train/malignant/55.jpg'
Image4 = cv2.imread(path_3,0)
path_4 = r'data/test/benign/105.jpg'
Image5 = cv2.imread(path_4,0)
path_5 = r'data/test/benign/259.jpg'
Image6 = cv2.imread(path_5,0)
fig.add_subplot(3,3,1)
plt.imshow(Image1)
fig.add_subplot(3,3,2)
plt.imshow(Image2)
fig.add_subplot(3,3,3)
plt.imshow(Image3)
fig.add_subplot(3,3,4)
plt.imshow(Image4)
fig.add_subplot(3,3,5)
plt.imshow(Image5)
fig.add_subplot(3,3,6)
plt.imshow(Image6)
Out[3]:
Rescaling
In [4]:
train = ImageDataGenerator(rescale=1./255)
test = ImageDataGenerator(rescale=1./255)
val = ImageDataGenerator(rescale=1./255)
In [5]:
train='data/train/'
In [6]:
train_data = tf.keras.preprocessing.image_dataset_from_directory(
train,
validation_split=0.25,
image_size=(224,224),
batch_size=35,
subset='training',
seed=50 )
In [7]:
val='data/train/'
In [8]:
val_data = tf.keras.preprocessing.image_dataset_from_directory(
val,
validation_split=0.25,
image_size=(224,224),
batch_size=35,
subset='validation',
seed=50
)
In [9]:
test='data/test/'
In [10]:
test_data=tf.keras.preprocessing.image_dataset_from_directory(
test,
image_size=(224,224),
batch_size=35,
seed=50
)
In [11]:
class_names = ['Benign', 'Malignant']
In [12]:
train_data.class_names = class_names
val_data.class_names = class_names
In [13]:
plt.figure(figsize=(14, 14))
for images, labels in train_data.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(train_data.class_names[labels[i]])
plt.axis("off")
In [14]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Input
from tensorflow.keras.layers import Dense
Model Creation
In [15]:
model=Sequential()
In [16]:
#CNN Layer
model.add(Conv2D(32,(3,3), activation='relu', input_shape=(224,224,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(50,activation='relu'))
model.add(Dense(100,activation='relu'))
model.add(Dense(200,activation='relu'))
model.add(Dense(2, activation='softmax'))
In [17]:
model.summary()
In [18]:
model.compile(loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
In [19]:
samples=2637
batch_size=35
new_values= samples//batch_size
value=print(new_values)
history = model.fit(
train_data,
steps_per_epoch = value,
epochs=10,
validation_data=val_data,
)
In [20]:
model.evaluate(val_data)
Out[20]:
In [21]:
model.evaluate(test_data)
Out[21]:
Model Saving
In [22]:
model.save("cnn_model_skin1.h5")
Graph Plots
In [23]:
plt.plot(history.history['accuracy'])
plt.title('Train model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [24]:
plt.plot(history.history['loss'])
plt.title('Train Model loss')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [25]:
plt.plot(history.history['val_accuracy'])
plt.title(' Val model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [26]:
plt.plot(history.history['val_loss'])
plt.title(' Val Model Loss')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
In [27]:
loss_train = history.history['loss']
loss_val = history.history['val_loss']
plt.plot(loss_train, 'g', label='Training loss')
plt.plot(loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
In [28]:
accuracy_train = history.history['accuracy']
accuracy_val = history.history['val_accuracy']
plt.plot(accuracy_train, 'g', label='Training accuracy')
plt.plot(accuracy_val, 'b', label='Validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
In [29]:
from tensorflow.keras.models import load_model
prediction=tf.keras.models.load_model("cnn_model_skin1.h5")
Prediction
In [30]:
import glob
import cv2
In [31]:
path = glob.glob("dummy/*.jpg")
print(path)
In [32]:
list=[]
In [33]:
for file in path:
photo=cv2.imread(file)
list.append(photo)
In [34]:
from tensorflow.keras.preprocessing import image
In [35]:
dir_path="dummy/"
for i in os.listdir(dir_path):
img=image.load_img(dir_path+'//'+ i)
plt.imshow(img)
plt.show()
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
p=np.argmax(prediction.predict(x))
if p==0:
print("---------------Benign---------------\n\n\n")
elif p==1:
print("--------------Malignant-------------\n\n\n")
In [36]:
class_names={0:"Benign", 1:"Malignant"}
In [37]:
for images, labels in test_data.take(1):
for i in range(6):
print("True_class:",val_data.class_names[labels[i]])
x = image.img_to_array(images[i])
x = np.expand_dims(x, axis=0)
p=np.argmax(model.predict(x))
if p==0:
print("Predicted Image: Benign")
else:
print("Predicted Image: Malignant")
print("Predicted class:",p)
print(" ")
DeepCC¶
In [38]:
!deepCC "cnn_model_skin1.h5"