Quick Start¶
Credit: AITS Cainvas Community
Photo by Sandipan Dey on SandipanWeb
- Use MNIST Dataset,
- To Train NN model with TensorFlow-Keras and
- Compile NN model with deepC
To run a Code Cell you can click on the ⏯ Run
button in the Navigation Bar above or type Shift + Enter
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
# Get some knowledge about current environment
print("TensorFlow version {}".format(tf.__version__))
print("Eager mode: ", tf.executing_eagerly())
print("Is GPU available: ",tf.config.list_physical_devices('GPU'))
Load MNIST dataset:
mnist = tf.keras.datasets.mnist
(images_train, labels_train),(images_test, labels_test) = mnist.load_data()
class_names = ["zero","one","two","three","four","five","six","seven","eight","nine"]
What is our training data?¶
Training images¶
print("Data type:", type(images_train))
print("Dataset shape:", (images_train.shape))
It means that we get array of 60000 images for training, 28x28 pixels each.
Labels¶
print("Labels:", len(labels_train))
print("Possible values:", np.unique(labels_train))
Example image¶
plt.figure()
plt.imshow(images_train[0])
plt.colorbar()
plt.grid(False)
plt.xlabel("Classification label: {}".format(labels_train[0]))
plt.show()
Normalization¶
Each cell (pixel) can have value from 0 to 255 like it's presented above. Let's scale it to float value between 0 and 1:
images_train = images_train / 255.0
images_test = images_test / 255.0
plt.figure()
plt.imshow(images_train[0])
plt.colorbar()
plt.grid(False)
plt.xlabel("Classification label: {}".format(labels_train[0]))
plt.show()
Note¶
It is worth mentioning that input images are inverted (white number on black background). MNIST data is prepared in a way where the smaller pixel's value is, the whiter it is (0 means 100% white, 255 means 100% black).
It's important because when you plug in model to an Android app, you will need to prepare input image in the same way (scale down and inverse colors).
plt.figure(figsize=(10, 10))
plt.subplot(3, 6, 1)
plt.xticks([])
plt.yticks([])
plt.xlabel("Inverse")
plt.imshow(images_train[0], cmap=plt.cm.binary)
plt.subplot(3, 6, 2)
plt.xticks([])
plt.yticks([])
plt.xlabel("Original")
plt.imshow(images_train[0], cmap=plt.cm.gray)
NN Model Creation¶
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28,28), name="flat_1"),
keras.layers.Dense(100, activation=tf.nn.relu, name="dense_1"),
keras.layers.Dropout(0.2, name="dropout_1"),
keras.layers.Dense(10, activation=tf.nn.softmax, name="dense_2")
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
Display NN Model Architecture¶
model.summary()
tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=False, show_layer_names=True,
rankdir='TB', expand_nested=False, dpi=96
)
Model training¶
model.fit(images_train, labels_train, epochs=16)
test_loss, test_acc = model.evaluate(images_test, labels_test)
print('Test accuracy:', test_acc)
Model evaluation¶
Let's take one image from the test set:
example_img = images_test[0]
np.savetxt('img.data', example_img.flatten())
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(example_img, cmap=plt.cm.binary)
Prediction¶
What does our prediction say?
example_img_as_input = (np.expand_dims(example_img,0))
print("Image data shape:", example_img_as_input.shape)
img_prediction = model.predict(example_img_as_input)
print("Prediction results:", img_prediction)
print("Predicted value:", np.argmax(img_prediction))
Visualise more predictions. Plot the first X test images, their predicted label, and the true label. Color correct predictions in blue, incorrect predictions in red.
# Helper functions
import matplotlib.pyplot as plt
import numpy as np
def plot_image(i, predictions_array, true_label, img, class_names):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100 * np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
predictions = model.predict(images_test)
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2 *2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, labels_test, images_test, class_names)
# plt.subplot(num_rows, 2*num_cols, 2*i+2)
# plot_value_array(i, predictions, labels_test)
Test Accuracy¶
loss, acc = model.evaluate(images_test, labels_test)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
print("Restored model, loss: {}".format(loss))
Save Model¶
- TF PB format
- TF Lite format
- TF Lite Quantized
saved_model_dir = 'mnist_model.TF'
model.save(saved_model_dir)
tflite_model_file = "converted_model.tflite"
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
open(tflite_model_file, "wb").write(tflite_model)
tflite_quantized_model_file = "converted_quantized_model.tflite"
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quantized_model = converter.convert()
open(tflite_quantized_model_file, "wb").write(tflite_quantized_model)
!deepCC mnist_model.TF --format=tensorflow
!./mnist_model.TF_deepC/mnist_model.TF.exe img.data
nn_out = np.loadtxt('Identity_0.out')
print ("Model prediction is DIGIT : ", np.argmax(nn_out))