Cainvas

Braille Characters Detection

Credit: AITS Cainvas Community

Photo by VAGO on Dribbble

Import required libraries

In [1]:
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from sklearn.preprocessing import LabelEncoder
import PIL
import cv2
import shutil
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras.models import Model,load_model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping

Load the images folder

In [2]:
!wget https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/archive_5.zip
--2021-10-26 04:34:22--  https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/archive_5.zip
Resolving cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)... 52.219.66.16
Connecting to cainvas-static.s3.amazonaws.com (cainvas-static.s3.amazonaws.com)|52.219.66.16|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1361006 (1.3M) [application/x-zip-compressed]
Saving to: ‘archive_5.zip.1’

archive_5.zip.1     100%[===================>]   1.30M  --.-KB/s    in 0.02s   

2021-10-26 04:34:22 (55.2 MB/s) - ‘archive_5.zip.1’ saved [1361006/1361006]

In [3]:
!unzip -qo archive_5.zip
In [4]:
rootdir = Path('Braille Dataset/Braille Dataset')
dir_list = list(rootdir.glob('*.jpg'))
In [5]:
image_count = len(dir_list)
image_count
Out[5]:
1560

Viewing sample images from the dataset

In [6]:
from tensorflow.keras.preprocessing import image
img=image.load_img('Braille Dataset/Braille Dataset/a1.JPG0dim.jpg')
plt.imshow(img)
Out[6]:
<matplotlib.image.AxesImage at 0x7f752de96f60>

Preprocessing the data

In [24]:
os.mkdir('./images/')
alpha = 'a'
for i in range(0, 26): 
    os.mkdir('./images/' + alpha)
    alpha = chr(ord(alpha) + 1)

for file in os.listdir(rootdir):
    letter = file[0]
    path2= Path('./images/' + letter + '/' + file)
    path1 = Path('Braille Dataset/Braille Dataset'+ '/' +file)
    try:
        shutil.copy(path1, path2)
    except :
        print(file)
In [25]:
datagen = ImageDataGenerator(rotation_range=20,
                             shear_range=10,
                             validation_split=0.2)

train_generator = datagen.flow_from_directory('./images/',
                                              target_size=(28,28),
                                              subset='training')

val_generator = datagen.flow_from_directory('./images/',
                                            target_size=(28,28),
                                            subset='validation')
Found 1248 images belonging to 26 classes.
Found 312 images belonging to 26 classes.

Model creation

In [26]:
model_ckpt = ModelCheckpoint('BrailleNet.h5',save_best_only=True)
reduce_lr = ReduceLROnPlateau(patience=8,verbose=0)
early_stop = EarlyStopping(patience=15,verbose=1)

entry = layers.Input(shape=(28,28,3))
x = layers.SeparableConv2D(64,(3,3),activation='relu')(entry)
x = layers.MaxPooling2D((2,2))(x)
x = layers.SeparableConv2D(128,(3,3),activation='relu')(x)
x = layers.MaxPooling2D((2,2))(x)
x = layers.SeparableConv2D(256,(2,2),activation='relu')(x)
x = layers.GlobalMaxPooling2D()(x)
x = layers.Dense(256)(x)
x = layers.LeakyReLU()(x)
x = layers.Dense(64,kernel_regularizer=l2(2e-4))(x)
x = layers.LeakyReLU()(x)
x = layers.Dense(26,activation='softmax')(x)

model = Model(entry,x)
In [27]:
model.summary()
Model: "functional_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         [(None, 28, 28, 3)]       0         
_________________________________________________________________
separable_conv2d_3 (Separabl (None, 26, 26, 64)        283       
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 13, 13, 64)        0         
_________________________________________________________________
separable_conv2d_4 (Separabl (None, 11, 11, 128)       8896      
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 5, 5, 128)         0         
_________________________________________________________________
separable_conv2d_5 (Separabl (None, 4, 4, 256)         33536     
_________________________________________________________________
global_max_pooling2d_1 (Glob (None, 256)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 256)               65792     
_________________________________________________________________
leaky_re_lu_2 (LeakyReLU)    (None, 256)               0         
_________________________________________________________________
dense_4 (Dense)              (None, 64)                16448     
_________________________________________________________________
leaky_re_lu_3 (LeakyReLU)    (None, 64)                0         
_________________________________________________________________
dense_5 (Dense)              (None, 26)                1690      
=================================================================
Total params: 126,645
Trainable params: 126,645
Non-trainable params: 0
_________________________________________________________________
In [28]:
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

Training the Model

In [29]:
history = model.fit_generator(train_generator,
                              validation_data=val_generator,
                              epochs=50,
                              callbacks=[model_ckpt,reduce_lr,early_stop],
                              verbose=1)
Epoch 1/50
39/39 [==============================] - 1s 25ms/step - loss: 3.2879 - accuracy: 0.0304 - val_loss: 3.2565 - val_accuracy: 0.0513
Epoch 2/50
39/39 [==============================] - 1s 20ms/step - loss: 3.2008 - accuracy: 0.0737 - val_loss: 3.0424 - val_accuracy: 0.1442
Epoch 3/50
39/39 [==============================] - 1s 20ms/step - loss: 2.8271 - accuracy: 0.1907 - val_loss: 2.5630 - val_accuracy: 0.2500
Epoch 4/50
39/39 [==============================] - 1s 20ms/step - loss: 2.3139 - accuracy: 0.3237 - val_loss: 1.9339 - val_accuracy: 0.4423
Epoch 5/50
39/39 [==============================] - 1s 20ms/step - loss: 1.8247 - accuracy: 0.4800 - val_loss: 1.8538 - val_accuracy: 0.4263
Epoch 6/50
39/39 [==============================] - 1s 20ms/step - loss: 1.5288 - accuracy: 0.5697 - val_loss: 1.5570 - val_accuracy: 0.5288
Epoch 7/50
39/39 [==============================] - 1s 20ms/step - loss: 1.3108 - accuracy: 0.6130 - val_loss: 1.2154 - val_accuracy: 0.6442
Epoch 8/50
39/39 [==============================] - 1s 20ms/step - loss: 1.0555 - accuracy: 0.6987 - val_loss: 1.0265 - val_accuracy: 0.7083
Epoch 9/50
39/39 [==============================] - 1s 20ms/step - loss: 0.8850 - accuracy: 0.7476 - val_loss: 0.8268 - val_accuracy: 0.7404
Epoch 10/50
39/39 [==============================] - 1s 20ms/step - loss: 0.7598 - accuracy: 0.7740 - val_loss: 0.8108 - val_accuracy: 0.7885
Epoch 11/50
39/39 [==============================] - 1s 20ms/step - loss: 0.7251 - accuracy: 0.7957 - val_loss: 0.7873 - val_accuracy: 0.7917
Epoch 12/50
39/39 [==============================] - 1s 20ms/step - loss: 0.6806 - accuracy: 0.7981 - val_loss: 0.5830 - val_accuracy: 0.8462
Epoch 13/50
39/39 [==============================] - 1s 20ms/step - loss: 0.5604 - accuracy: 0.8405 - val_loss: 0.6155 - val_accuracy: 0.8429
Epoch 14/50
39/39 [==============================] - 1s 19ms/step - loss: 0.5211 - accuracy: 0.8526 - val_loss: 0.6768 - val_accuracy: 0.8205
Epoch 15/50
39/39 [==============================] - 1s 20ms/step - loss: 0.5544 - accuracy: 0.8429 - val_loss: 0.6641 - val_accuracy: 0.8013
Epoch 16/50
39/39 [==============================] - 1s 20ms/step - loss: 0.4686 - accuracy: 0.8654 - val_loss: 0.5101 - val_accuracy: 0.8622
Epoch 17/50
39/39 [==============================] - 1s 19ms/step - loss: 0.4450 - accuracy: 0.8798 - val_loss: 0.5690 - val_accuracy: 0.8526
Epoch 18/50
39/39 [==============================] - 1s 20ms/step - loss: 0.4367 - accuracy: 0.8686 - val_loss: 0.5395 - val_accuracy: 0.8558
Epoch 19/50
39/39 [==============================] - 1s 19ms/step - loss: 0.5186 - accuracy: 0.8470 - val_loss: 1.2789 - val_accuracy: 0.6250
Epoch 20/50
39/39 [==============================] - 1s 19ms/step - loss: 0.4931 - accuracy: 0.8574 - val_loss: 0.5722 - val_accuracy: 0.8397
Epoch 21/50
39/39 [==============================] - 1s 20ms/step - loss: 0.3798 - accuracy: 0.8862 - val_loss: 0.4391 - val_accuracy: 0.8590
Epoch 22/50
39/39 [==============================] - 1s 19ms/step - loss: 0.3543 - accuracy: 0.8862 - val_loss: 0.4954 - val_accuracy: 0.8942
Epoch 23/50
39/39 [==============================] - 1s 20ms/step - loss: 0.3125 - accuracy: 0.9135 - val_loss: 0.4833 - val_accuracy: 0.8622
Epoch 24/50
39/39 [==============================] - 1s 20ms/step - loss: 0.3007 - accuracy: 0.9255 - val_loss: 0.4197 - val_accuracy: 0.8974
Epoch 25/50
39/39 [==============================] - 1s 19ms/step - loss: 0.3035 - accuracy: 0.9103 - val_loss: 0.5513 - val_accuracy: 0.8494
Epoch 26/50
39/39 [==============================] - 1s 20ms/step - loss: 0.2912 - accuracy: 0.9183 - val_loss: 0.3529 - val_accuracy: 0.9071
Epoch 27/50
39/39 [==============================] - 1s 20ms/step - loss: 0.2154 - accuracy: 0.9359 - val_loss: 0.3320 - val_accuracy: 0.9167
Epoch 28/50
39/39 [==============================] - 1s 19ms/step - loss: 0.2512 - accuracy: 0.9247 - val_loss: 0.3498 - val_accuracy: 0.9263
Epoch 29/50
39/39 [==============================] - 1s 19ms/step - loss: 0.2782 - accuracy: 0.9143 - val_loss: 0.3629 - val_accuracy: 0.9103
Epoch 30/50
39/39 [==============================] - 1s 20ms/step - loss: 0.2419 - accuracy: 0.9319 - val_loss: 0.3893 - val_accuracy: 0.9006
Epoch 31/50
39/39 [==============================] - 1s 19ms/step - loss: 0.2274 - accuracy: 0.9423 - val_loss: 0.3554 - val_accuracy: 0.9071
Epoch 32/50
39/39 [==============================] - 1s 20ms/step - loss: 0.2472 - accuracy: 0.9247 - val_loss: 0.4495 - val_accuracy: 0.8910
Epoch 33/50
39/39 [==============================] - 1s 19ms/step - loss: 0.2370 - accuracy: 0.9303 - val_loss: 0.4463 - val_accuracy: 0.8910
Epoch 34/50
39/39 [==============================] - 1s 19ms/step - loss: 0.4310 - accuracy: 0.8782 - val_loss: 0.5778 - val_accuracy: 0.8526
Epoch 35/50
39/39 [==============================] - 1s 19ms/step - loss: 0.2488 - accuracy: 0.9231 - val_loss: 0.3673 - val_accuracy: 0.9135
Epoch 36/50
39/39 [==============================] - 1s 20ms/step - loss: 0.1405 - accuracy: 0.9671 - val_loss: 0.2828 - val_accuracy: 0.9327
Epoch 37/50
39/39 [==============================] - 1s 19ms/step - loss: 0.1237 - accuracy: 0.9752 - val_loss: 0.2936 - val_accuracy: 0.9391
Epoch 38/50
39/39 [==============================] - 1s 20ms/step - loss: 0.1178 - accuracy: 0.9752 - val_loss: 0.3032 - val_accuracy: 0.9071
Epoch 39/50
39/39 [==============================] - 1s 20ms/step - loss: 0.1185 - accuracy: 0.9760 - val_loss: 0.2329 - val_accuracy: 0.9423
Epoch 40/50
39/39 [==============================] - 1s 19ms/step - loss: 0.1223 - accuracy: 0.9752 - val_loss: 0.2493 - val_accuracy: 0.9359
Epoch 41/50
39/39 [==============================] - 1s 20ms/step - loss: 0.1031 - accuracy: 0.9800 - val_loss: 0.2102 - val_accuracy: 0.9519
Epoch 42/50
39/39 [==============================] - 1s 19ms/step - loss: 0.1221 - accuracy: 0.9720 - val_loss: 0.2779 - val_accuracy: 0.9199
Epoch 43/50
39/39 [==============================] - 1s 19ms/step - loss: 0.1035 - accuracy: 0.9800 - val_loss: 0.2338 - val_accuracy: 0.9327
Epoch 44/50
39/39 [==============================] - 1s 19ms/step - loss: 0.0843 - accuracy: 0.9864 - val_loss: 0.2895 - val_accuracy: 0.9423
Epoch 45/50
39/39 [==============================] - 1s 19ms/step - loss: 0.1095 - accuracy: 0.9800 - val_loss: 0.2284 - val_accuracy: 0.9487
Epoch 46/50
39/39 [==============================] - 1s 19ms/step - loss: 0.0976 - accuracy: 0.9792 - val_loss: 0.3559 - val_accuracy: 0.9038
Epoch 47/50
39/39 [==============================] - 1s 19ms/step - loss: 0.0874 - accuracy: 0.9840 - val_loss: 0.3029 - val_accuracy: 0.9263
Epoch 48/50
39/39 [==============================] - 1s 20ms/step - loss: 0.0977 - accuracy: 0.9800 - val_loss: 0.2959 - val_accuracy: 0.9327
Epoch 49/50
39/39 [==============================] - 1s 19ms/step - loss: 0.0856 - accuracy: 0.9832 - val_loss: 0.2276 - val_accuracy: 0.9423
Epoch 50/50
39/39 [==============================] - 1s 19ms/step - loss: 0.0783 - accuracy: 0.9904 - val_loss: 0.2576 - val_accuracy: 0.9359

Visualize the accuracy and loss to check whether our model is overfitting or not

In [30]:
sns.set()

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)

# Accuracy plot
plt.plot(epochs, acc, color='green', label='Training Accuracy')
plt.plot(epochs, val_acc, color='blue', label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()

plt.figure()
# Loss plot
plt.plot(epochs, loss, color='green', label='Training Loss')
plt.plot(epochs, val_loss, color='red', label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()

plt.show()
In [31]:
pred= model.predict(val_generator)
pred
Out[31]:
array([[3.5818102e-06, 6.0681202e-08, 1.4169400e-05, ..., 1.0753806e-06,
        4.1025424e-13, 3.1398381e-03],
       [4.8559308e-16, 3.6052746e-18, 1.1520788e-16, ..., 1.7023364e-02,
        9.6547413e-01, 5.3938215e-06],
       [1.0701818e-22, 6.2353514e-20, 2.2532765e-21, ..., 2.7852018e-10,
        7.4175697e-16, 2.3607448e-08],
       ...,
       [4.5034299e-03, 3.4636867e-07, 2.8374844e-10, ..., 7.3189068e-12,
        2.2838371e-19, 1.3361260e-14],
       [8.6784920e-13, 2.7956071e-08, 1.4969432e-19, ..., 1.5051525e-14,
        1.2693156e-11, 1.1765985e-16],
       [1.4716119e-01, 1.2941795e-03, 1.3852440e-01, ..., 6.5823588e-05,
        4.3821038e-07, 2.9651492e-06]], dtype=float32)
In [32]:
model.save('braille.h5')
In [33]:
!deepCC braille.h5
[INFO]
Reading [keras model] 'braille.h5'
[SUCCESS]
Saved 'braille_deepC/braille.onnx'
[INFO]
Reading [onnx model] 'braille_deepC/braille.onnx'
[INFO]
Model info:
  ir_vesion : 5
  doc       : 
[WARNING]
[ONNX]: terminal (input/output) input_2's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) dense_5's shape is less than 1. Changing it to 1.
WARN (GRAPH): found operator node with the same name (dense_5) as io node.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'braille_deepC/braille.cpp'
[INFO]
deepSea model files are ready in 'braille_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "braille_deepC/braille.cpp" -D_AITS_MAIN -o "braille_deepC/braille.exe"
[RUNNING COMMAND]
size "braille_deepC/braille.exe"
   text	   data	    bss	    dec	    hex	filename
 706113	   4360	    760	 711233	  ada41	braille_deepC/braille.exe
[SUCCESS]
Saved model as executable "braille_deepC/braille.exe"
In [ ]: