NOTE: This Use Case is not purposed for resource constrained devices.
Import required libraries¶
In [1]:
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from sklearn.preprocessing import LabelEncoder
import PIL
import cv2
import shutil
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras.models import Model,load_model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping
Load the images folder¶
In [2]:
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/archive_5.zip
In [3]:
!unzip -qo archive_5.zip
In [4]:
rootdir = Path('Braille Dataset/Braille Dataset')
dir_list = list(rootdir.glob('*.jpg'))
In [5]:
image_count = len(dir_list)
image_count
Out[5]:
Viewing sample images from the dataset¶
In [6]:
from tensorflow.keras.preprocessing import image
img=image.load_img('Braille Dataset/Braille Dataset/a1.JPG0dim.jpg')
plt.imshow(img)
Out[6]:
Preprocessing the data¶
In [24]:
os.mkdir('./images/')
alpha = 'a'
for i in range(0, 26):
os.mkdir('./images/' + alpha)
alpha = chr(ord(alpha) + 1)
for file in os.listdir(rootdir):
letter = file[0]
path2= Path('./images/' + letter + '/' + file)
path1 = Path('Braille Dataset/Braille Dataset'+ '/' +file)
try:
shutil.copy(path1, path2)
except :
print(file)
In [25]:
datagen = ImageDataGenerator(rotation_range=20,
shear_range=10,
validation_split=0.2)
train_generator = datagen.flow_from_directory('./images/',
target_size=(28,28),
subset='training')
val_generator = datagen.flow_from_directory('./images/',
target_size=(28,28),
subset='validation')
Model creation¶
In [26]:
model_ckpt = ModelCheckpoint('BrailleNet.h5',save_best_only=True)
reduce_lr = ReduceLROnPlateau(patience=8,verbose=0)
early_stop = EarlyStopping(patience=15,verbose=1)
entry = layers.Input(shape=(28,28,3))
x = layers.SeparableConv2D(64,(3,3),activation='relu')(entry)
x = layers.MaxPooling2D((2,2))(x)
x = layers.SeparableConv2D(128,(3,3),activation='relu')(x)
x = layers.MaxPooling2D((2,2))(x)
x = layers.SeparableConv2D(256,(2,2),activation='relu')(x)
x = layers.GlobalMaxPooling2D()(x)
x = layers.Dense(256)(x)
x = layers.LeakyReLU()(x)
x = layers.Dense(64,kernel_regularizer=l2(2e-4))(x)
x = layers.LeakyReLU()(x)
x = layers.Dense(26,activation='softmax')(x)
model = Model(entry,x)
In [27]:
model.summary()
In [28]:
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
Training the Model¶
In [29]:
history = model.fit_generator(train_generator,
validation_data=val_generator,
epochs=50,
callbacks=[model_ckpt,reduce_lr,early_stop],
verbose=1)
Visualize the accuracy and loss to check whether our model is overfitting or not¶
In [30]:
sns.set()
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
# Accuracy plot
plt.plot(epochs, acc, color='green', label='Training Accuracy')
plt.plot(epochs, val_acc, color='blue', label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.figure()
# Loss plot
plt.plot(epochs, loss, color='green', label='Training Loss')
plt.plot(epochs, val_loss, color='red', label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
In [31]:
pred= model.predict(val_generator)
pred
Out[31]:
In [32]:
model.save('braille.h5')
In [33]:
!deepCC braille.h5
In [ ]: