Importing Necessary Libraries¶
In [1]:
!pip install wget
In [2]:
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import tensorflow as tf
import wget
In [3]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
Importing the Dataset¶
In [4]:
url = "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/FE.zip"
file = wget.download(url)
In [5]:
import zipfile
data = zipfile.ZipFile(file,"r")
data.extractall("FE")
#data.printdir()
In [6]:
image = image.load_img("FE/FE/FE/Train/Happy/1.jpg")
In [7]:
plt.imshow(image)
Out[7]:
In [8]:
cv2.imread("FE/FE/FE/Train/Happy/1.jpg").shape
Out[8]:
Preprocessing Steps¶
In [9]:
img_width,img_height = 256,256
train_data_dir = "FE/FE/FE/Train/"
validation_data_dir = "FE/FE/FE/Validation/"
batch_size = 20
In [10]:
if K.image_data_format() == 'channels_first':
input_shape = (3,img_width,img_height)
else:
input_shape = (img_width,img_height,3)
In [11]:
train= ImageDataGenerator(rescale = 1./255 , featurewise_center = False, shear_range = 0.2,
featurewise_std_normalization = False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1,
horizontal_flip=True)
validation = ImageDataGenerator(rescale = 1./255 )
In [12]:
train_dataset = train.flow_from_directory(train_data_dir,target_size = (256,256),batch_size = 10,class_mode = "binary")
validation_dataset = validation.flow_from_directory(validation_data_dir,target_size = (256,256),batch_size = 5,class_mode = "binary")
In [13]:
train_dataset.class_indices
Out[13]:
In [14]:
train_dataset.classes
Out[14]:
Model¶
In [15]:
model = Sequential()
model.add(Conv2D(32,(3,3),input_shape = (256,256,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (4,4)))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (4,4)))
model.add(Dropout(0.7))
model.add(Conv2D(128,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (4,4)))
model.add(Dropout(0.7))
model.add(Flatten())
model.add(Dense(128,activation = "relu"))
model.add(Dense(1, activation = 'sigmoid'))
In [16]:
model.summary()
In [17]:
model.compile(loss = "binary_crossentropy",optimizer = "Adam" ,metrics = ['accuracy'])
In [18]:
model_fit = model.fit_generator(train_dataset,steps_per_epoch = 9,epochs = 150,validation_data = validation_dataset)
In [19]:
model.save('final_model.h5')
In [20]:
model = load_model('final_model.h5')
Analysis¶
In [21]:
print("accuracy at epoch 1:",model_fit.history["accuracy"][0])
print("accuracy at epoch 44",model_fit.history["accuracy"][149])
print("validation accuracy at epoch 1:",model_fit.history["val_accuracy"][0])
print("validation accuracy at epoch 44:",model_fit.history["val_accuracy"][149])
plt.plot(model_fit.history["accuracy"])
plt.plot(model_fit.history["val_accuracy"])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
In [22]:
print(f"loss at epoch 1: {model_fit.history['loss'][0]}")
print(f"loss at epoch 44: {model_fit.history['loss'][149]}")
print(f"validation loss at epoch 1: {model_fit.history['loss'][0]}")
print(f"validation loss at epoch 44: {model_fit.history['loss'][149]}")
plt.plot(model_fit.history['loss'])
plt.plot(model_fit.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
Prediction¶
In [23]:
test_url = "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/Test_FE.zip"
test_file = wget.download(test_url)
In [24]:
data2 = zipfile.ZipFile(test_file,"r")
data2.extractall("Test FE")
In [25]:
from tensorflow.keras.preprocessing import image
In [26]:
img_pred = image.load_img("Test FE/Test FE/converted-1.jpg",target_size = (256,256))
img_pred = image.img_to_array(img_pred)
img_pred = np.expand_dims(img_pred,axis = 0)
In [27]:
result = model.predict(img_pred)
print(result)
if result[0][0] == 1:
print("Sad")
else:
print("Happy")
DeepCC¶
In [28]:
!deepCC final_model.h5
In [ ]: