Cainvas
In [1]:
import numpy as np, matplotlib.pyplot as plt
import os

from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Activation, Dropout, BatchNormalization, Flatten
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical

Input Data

In [2]:
train = ImageDataGenerator(rescale = 1/255)
test = ImageDataGenerator(rescale = 1/255)

train_dataset = train.flow_from_directory(directory='/home/jupyter-SiddharthGan/week5/seg_train/seg_train/',
                                         target_size=(50,50), shuffle=True)

test_dataset = test.flow_from_directory(directory='/home/jupyter-SiddharthGan/week5/seg_test/seg_test/',
                                         target_size=(50,50), shuffle=True)
Found 14034 images belonging to 6 classes.
Found 3000 images belonging to 6 classes.

Data Visualization

In [3]:
indices = [np.random.randint(32) for i in range(10)]

plt.figure(figsize=(20,8))
for i in enumerate(indices):
    plt.subplot(2,5,i[0]+1)
    plt.imshow(train_dataset[0][0][i[1]])
    plt.title(train_dataset[0][1][i[1]])
plt.show()
/opt/tljh/user/lib/python3.7/site-packages/matplotlib/text.py:1165: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
  if s != self._text:
In [4]:
# Mapping the class indices
values = list(train_dataset.class_indices.values())
keys = list(train_dataset.class_indices.keys())

dics = list(map(lambda x,y: {x:y}, values,keys))

from functools import reduce
mappings = reduce(lambda x,y: {**x,**y}, dics)
mappings
Out[4]:
{0: 'buildings',
 1: 'forest',
 2: 'glacier',
 3: 'mountain',
 4: 'sea',
 5: 'street'}

Model Architecture

In [5]:
# Initializing the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', input_shape=(50,50,3)))
model.add(Activation('relu'))
model.add(Conv2D(filters=32, kernel_size=(3,3)))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))


model.add(Conv2D(filters=50, kernel_size=(3,3)))
model.add(Activation('relu'))
model.add(Conv2D(filters=50, kernel_size=(3,3)))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
          
model.add(Flatten())
kernel_regularizer = keras.regularizers.l1_l2(l1=1e-5, l2=1e-4)
model.add(Dense(units=40, activation='relu', kernel_regularizer=kernel_regularizer))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(6, activation='softmax'))

model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 50, 50, 32)        896       
_________________________________________________________________
activation (Activation)      (None, 50, 50, 32)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 48, 48, 32)        9248      
_________________________________________________________________
dropout (Dropout)            (None, 48, 48, 32)        0         
_________________________________________________________________
activation_1 (Activation)    (None, 48, 48, 32)        0         
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 24, 24, 32)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 22, 22, 50)        14450     
_________________________________________________________________
activation_2 (Activation)    (None, 22, 22, 50)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 20, 20, 50)        22550     
_________________________________________________________________
dropout_1 (Dropout)          (None, 20, 20, 50)        0         
_________________________________________________________________
activation_3 (Activation)    (None, 20, 20, 50)        0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 10, 10, 50)        0         
_________________________________________________________________
flatten (Flatten)            (None, 5000)              0         
_________________________________________________________________
dense (Dense)                (None, 40)                200040    
_________________________________________________________________
dense_1 (Dense)              (None, 50)                2050      
_________________________________________________________________
dropout_2 (Dropout)          (None, 50)                0         
_________________________________________________________________
dense_2 (Dense)              (None, 6)                 306       
=================================================================
Total params: 249,540
Trainable params: 249,540
Non-trainable params: 0
_________________________________________________________________

Training the model

In [6]:
# Training the model
model.compile(loss='CategoricalCrossentropy', optimizer='adam', metrics='accuracy')
history = model.fit(train_dataset, batch_size=80, epochs=12, validation_data=test_dataset)
Epoch 1/12
439/439 [==============================] - 6s 13ms/step - loss: 1.1947 - accuracy: 0.5281 - val_loss: 1.0033 - val_accuracy: 0.6203
Epoch 2/12
439/439 [==============================] - 5s 12ms/step - loss: 0.9583 - accuracy: 0.6313 - val_loss: 0.9135 - val_accuracy: 0.6580
Epoch 3/12
439/439 [==============================] - 5s 12ms/step - loss: 0.8522 - accuracy: 0.6857 - val_loss: 0.9274 - val_accuracy: 0.6527
Epoch 4/12
439/439 [==============================] - 5s 12ms/step - loss: 0.7634 - accuracy: 0.7282 - val_loss: 0.7646 - val_accuracy: 0.7413
Epoch 5/12
439/439 [==============================] - 5s 12ms/step - loss: 0.6640 - accuracy: 0.7716 - val_loss: 0.6797 - val_accuracy: 0.7597
Epoch 6/12
439/439 [==============================] - 5s 12ms/step - loss: 0.6158 - accuracy: 0.7926 - val_loss: 0.6762 - val_accuracy: 0.7783
Epoch 7/12
439/439 [==============================] - 5s 12ms/step - loss: 0.5626 - accuracy: 0.8172 - val_loss: 0.7244 - val_accuracy: 0.7550
Epoch 8/12
439/439 [==============================] - 5s 12ms/step - loss: 0.5384 - accuracy: 0.8221 - val_loss: 0.6259 - val_accuracy: 0.7990
Epoch 9/12
439/439 [==============================] - 5s 12ms/step - loss: 0.4904 - accuracy: 0.8427 - val_loss: 0.6419 - val_accuracy: 0.7813
Epoch 10/12
439/439 [==============================] - 5s 12ms/step - loss: 0.4630 - accuracy: 0.8478 - val_loss: 0.6432 - val_accuracy: 0.7807
Epoch 11/12
439/439 [==============================] - 5s 12ms/step - loss: 0.4329 - accuracy: 0.8650 - val_loss: 0.6952 - val_accuracy: 0.7747
Epoch 12/12
439/439 [==============================] - 5s 12ms/step - loss: 0.4034 - accuracy: 0.8742 - val_loss: 0.6637 - val_accuracy: 0.7817

Accuracy and Loss Graphs

In [7]:
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
Out[7]:
<matplotlib.legend.Legend at 0x7fed24beda90>
In [8]:
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label = 'val_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='lower right')
Out[8]:
<matplotlib.legend.Legend at 0x7fecc1a52e10>

Prediction

In [9]:
# Forming the prediction dataset
predict = ImageDataGenerator(rescale = 1/255)

predict_dataset = predict.flow_from_directory('/home/jupyter-SiddharthGan/week5/seg_pred/',
                                             target_size=(50,50), shuffle=True, batch_size=7301)
Found 7301 images belonging to 1 classes.
In [10]:
# Predicting images of the corresponding index

index = 25
tar = np.argmax(model.predict(predict_dataset[0][0][index].reshape(-1,50,50,3)))

for i in mappings:
    if tar == i:
        cls = mappings[i]

plt.imshow(predict_dataset[0][0][index])
plt.title(cls)
plt.show()
In [11]:
model.save('model.h5')

DeepCC

In [12]:
!deepCC model.h5
[INFO]
Reading [keras model] 'model.h5'
[SUCCESS]
Saved 'model_deepC/model.onnx'
[INFO]
Reading [onnx model] 'model_deepC/model.onnx'
[INFO]
Model info:
  ir_vesion : 5
  doc       : 
[WARNING]
[ONNX]: graph-node conv2d's attribute auto_pad has no meaningful data.
[WARNING]
[ONNX]: terminal (input/output) conv2d_input's shape is less than 1. Changing it to 1.
[WARNING]
[ONNX]: terminal (input/output) dense_2's shape is less than 1. Changing it to 1.
WARN (GRAPH): found operator node with the same name (dense_2) as io node.
[INFO]
Running DNNC graph sanity check ...
[SUCCESS]
Passed sanity check.
[INFO]
Writing C++ file 'model_deepC/model.cpp'
[INFO]
deepSea model files are ready in 'model_deepC/' 
[RUNNING COMMAND]
g++ -std=c++11 -O3 -fno-rtti -fno-exceptions -I. -I/opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/include -isystem /opt/tljh/user/lib/python3.7/site-packages/deepC-0.13-py3.7-linux-x86_64.egg/deepC/packages/eigen-eigen-323c052e1731 "model_deepC/model.cpp" -D_AITS_MAIN -o "model_deepC/model.exe"
[RUNNING COMMAND]
size "model_deepC/model.exe"
   text	   data	    bss	    dec	    hex	filename
1170445	   3784	    760	1174989	 11edcd	model_deepC/model.exe
[SUCCESS]
Saved model as executable "model_deepC/model.exe"
In [ ]: