Importing necessary libraries¶
In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import os
Reading images and labels for training¶
In [2]:
!wget -N "https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/dataset_ztOhYU1.zip"
!unzip -qo dataset_ztOhYU1.zip
!rmd ataset_ztOhYU1.zip
Reading train labels¶
In [3]:
df_train = pd.read_csv('dataset/train_labels.csv', index_col='images')
df_train
Out[3]:
Reading test labels¶
In [4]:
test_df = pd.read_csv('dataset/test_labels.csv', index_col='images')
test_df
Out[4]:
In [5]:
# reading train and test images from the folder and stacking them while keep tracking of corresponding labels
dataset_folder = 'dataset'
train_images = []
train_labels = []
test_images = []
test_labels = []
for folder in os.listdir(dataset_folder):
folder_path = os.path.join(dataset_folder, folder)
if folder == 'train_images':
for file in os.listdir(folder_path):
if file.endswith('jpg'):
img_path = os.path.join(folder_path, file)
img = cv2.imread(img_path)
train_images.append(img)
train_labels.append(df_train.loc[file, 'labels'])
elif folder == 'test_images':
for file in os.listdir(folder_path):
if file.endswith('jpg'):
img_path = os.path.join(folder_path, file)
img = cv2.imread(img_path)
test_images.append(img)
test_labels.append(test_df.loc[file, 'labels'])
else:
pass
train_images = np.array(train_images)
train_labels = np.array(train_labels)
test_images = np.array(test_images)
test_labels = np.array(test_labels)
print('Shape of stacked train images:', train_images.shape)
print('Shape of train labels:', train_labels.shape)
print('Shape of stacked test images:', test_images.shape)
print('Shape of test labels:', test_labels.shape)
Visualizing some images together with their label to have an idea about our data¶
In [6]:
# Function to convert binary label into text
def get_label(num):
if num == 0:
return 'NOT FALL'
if num == 1:
return 'FALL'
else:
return -1
In [7]:
fig, axes = plt.subplots(1, 2, figsize=(10, 8), squeeze=False)
axes[0][0].imshow(train_images[2])
axes[0][0].set_title(get_label(train_labels[2]))
axes[0][1].imshow(train_images[3])
axes[0][1].set_title(get_label(train_labels[3]));
Splitting our data into train and validation sets, building and training our model¶
In [8]:
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, stratify=train_labels, test_size=0.2)
In [9]:
def conv2d(filters: int, name):
return Conv2D(filters=filters, kernel_size=(3, 3), padding='same', kernel_regularizer=l2(0.), bias_regularizer=l2(0.), name=name)
In [10]:
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, ReLU
from tensorflow.keras.activations import sigmoid
from tensorflow.keras.regularizers import l2
# fallnet architecture
model_input = Input(shape=(X_train.shape[1], X_train.shape[2], X_train.shape[3]), name='inputs')
conv1 = conv2d(16, name='convoluton_1')(model_input)
act1 = ReLU(name='activation_1')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), name='pooling_1')(act1)
conv2 = conv2d(16, name='convolution_2')(pool1)
act2 = ReLU(name='activation_2')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), name='pooling_2')(act2)
conv3 = conv2d(32, name='convolution_3')(pool2)
act3 = ReLU(name='activation_3')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), name='pooling_3')(act3)
conv4 = conv2d(32, name='convolution_4')(pool3)
act4 = ReLU(name='activation_4')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2), name='pooling_4')(act4)
conv5 = conv2d(64, name='convolition_5')(pool4)
act5 = ReLU(name='activation_5')(conv5)
pool5 = MaxPooling2D(pool_size=(2, 2), name='pooling_5')(act5)
conv6 = conv2d(64, name='convolution_6')(pool5)
act6 = ReLU(name='activation_6')(conv6)
pool6 = MaxPooling2D(pool_size=(2, 2), name='pooling_6')(act6)
flat = Flatten(name='flatten')(pool6)
dense1 = Dense(32, name='dense1')(flat)
output = Dense(1, activation='sigmoid', name='output')(dense1)
model = Model(inputs=[model_input], outputs=[output])
model.summary()
In [11]:
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.005), loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train,y_train, epochs = 6, validation_data = (X_val, y_val))
Accuracy/Loss vs Epochs¶
In [12]:
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
model.save('model.h5')
print('Weights saved.')
Testing our model¶
In [13]:
predicted_labels = (model.predict(test_images) >= 0.5).astype('int64').flatten()
In [14]:
# visualizing our results
row = 3
col = 4
fig, axes = plt.subplots(row, col, figsize=(16, 14))
c = 0
for i in range(row):
for j in range(col):
axes[i][j].imshow(test_images[c])
axes[i][j].set_title(f'Predicted: {get_label(predicted_labels[c])}', fontsize=14)
axes[i][j].set_xlabel(f'Actual: {get_label(test_labels[c])}', fontsize=14)
c += 1