PyTorch MNIST Vision App¶
Credit: AITS Cainvas Community
Photo by Denis Dmitriev on YouTube
- Use MNIST Dataset,
- To Train NN model with PyTorch and
- Compile NN model with deepC
To run a Code Cell you can click on the ⏯ Run
button in the Navigation Bar above or type Shift + Enter
In [1]:
%pylab inline
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.dataloader as dataloader
import torch.optim as optim
from torch.utils.data import TensorDataset
from torch.autograd import Variable
from torchvision import transforms
from torchvision.datasets import MNIST
SEED = 1
# CUDA?
cuda = torch.cuda.is_available()
# For reproducibility
torch.manual_seed(SEED)
if cuda:
torch.cuda.manual_seed(SEED)
Dataset¶
Download, divide and inspect MNIST dataset
In [2]:
!wget -N 'https://cainvas-static.s3.amazonaws.com/media/user_data/cainvas-admin/MNIST.zip'
!unzip -o MNIST.zip
!rm MNIST.zip
In [3]:
MNIST_dataset = 'MNIST/data'
train = MNIST(MNIST_dataset, train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(), # ToTensor does min-max normalization.
]), )
test = MNIST(MNIST_dataset, train=False, download=True, transform=transforms.Compose([
transforms.ToTensor(), # ToTensor does min-max normalization.
]), )
# Create DataLoader
dataloader_args = dict(shuffle=True, batch_size=256,num_workers=4, pin_memory=True) if cuda else dict(shuffle=True, batch_size=64)
train_loader = dataloader.DataLoader(train, **dataloader_args)
test_loader = dataloader.DataLoader(test, **dataloader_args)
In [4]:
train_data = train.train_data
train_data = train.transform(train_data.numpy())
# print(train_data[:, 0, :].shape)
# for px in train_data[:, 0, :]:
# print(px, ' ')
print('[Train]')
print(' - Numpy Shape:', train.train_data.cpu().numpy().shape)
print(' - Tensor Shape:', train.train_data.size())
print(' - min:', torch.min(train_data))
print(' - max:', torch.max(train_data))
print(' - mean:', torch.mean(train_data))
print(' - std:', torch.std(train_data))
print(' - var:', torch.var(train_data))
In [5]:
from PIL import Image
img_data = train_data.numpy()[:, 1, :]
imshow(img_data)
#print(type(img_data))
#img = Image.fromarray(img_data, 'RGB')
#img.show()
#print(img)
#print(data)
#for px in data.flatten():
# print(px, ' ', end='')
Out[5]:
design NN Model¶
In [6]:
# One hidden Layer NN
class Model(nn.Module):
def __init__(self):
batch_size = 100
super(Model, self).__init__()
self.fc = nn.Linear(784, batch_size)
self.fc2 = nn.Linear(batch_size, 10)
def forward(self, x):
x = x.view((-1, 784))
h = F.relu(self.fc(x))
h = self.fc2(h)
return F.log_softmax(h)
model = Model()
if cuda:
model.cuda() # CUDA!
optimizer = optim.Adam(model.parameters(), lr=1e-3)
In [7]:
EPOCHS = 10
losses = []
model.train()
for epoch in range(EPOCHS):
for batch_idx, (data, target) in enumerate(train_loader):
# Get Samples
data, target = Variable(data), Variable(target)
if cuda:
data, target = data.cuda(), target.cuda()
# Init
optimizer.zero_grad()
# Predict
y_pred = model(data)
# Calculate loss
loss = F.cross_entropy(y_pred, target)
losses.append(loss.cpu().data)
# Backpropagation
loss.backward()
optimizer.step()
# Display
if batch_idx % 100 == 1:
print('\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch+1,
EPOCHS,
batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.cpu().data),
end='')
# Eval
evaluate_x = Variable(test_loader.dataset.data.type_as(torch.FloatTensor()))
evaluate_y = Variable(test_loader.dataset.targets)
if cuda:
evaluate_x, evaluate_y = evaluate_x.cuda(), evaluate_y.cuda()
model.eval()
output = model(evaluate_x)
pred = output.data.max(1)[1]
d = pred.eq(evaluate_y.data).cpu()
accuracy = d.sum().item()/d.size().numel()
print('\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Test Accuracy: {:.4f}%'.format(
epoch+1,
EPOCHS,
len(train_loader.dataset),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.cpu().data,
accuracy*100,
end=''))
In [8]:
plot(losses)
Out[8]:
Evaluate¶
In [9]:
evaluate_x = Variable(test_loader.dataset.data.type_as(torch.FloatTensor()))
evaluate_y = Variable(test_loader.dataset.targets)
if cuda:
evaluate_x, evaluate_y = evaluate_x.cuda(), evaluate_y.cuda()
model.eval()
output = model(evaluate_x)
pred = output.data.max(1)[1]
d = pred.eq(evaluate_y.data).cpu()
accuracy = d.sum().item()/d.size().numel()
print('Accuracy:', accuracy*100.0)
Save Model¶
In [10]:
dummy_input = torch.randn(train_data.shape)
torch.onnx.export(model, (dummy_input), "./mnist_model.onnx", verbose=True)
Compile Model with deepC¶
In [11]:
!deepCC mnist_model.onnx
In [12]:
!./mnist.deepC/mnist.exe
Display an Image¶
In [13]:
img_data = data[1][0].numpy();
np.savetxt('img.data', img_data.flatten())
In [14]:
img = Image.fromarray(img_data, 'RGB')
#print(img)
img.show()
imshow(img_data)
Out[14]:
Run mode prediction¶
on the image shown above
In [15]:
!./mnist_model_deepC/mnist_model.exe img.data
In [16]:
nn_out = np.loadtxt('10.out')
print ("Model prediction is DIGIT : ", np.argmax(nn_out))