200 - First percepton with pytorch

Links: notebook, html ., PDF, python, slides ., GitHub

First perception on MNIST database.

Note: install tqdm if not installed: !pip install tqdm

%matplotlib inline
import time
import numpy as np
import pandas as pd
from pylab import plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
print("torch", torch.__version__)
from torchvision import datasets, transforms
from tqdm import tqdm
torch 0.4.0
BATCH_SIZE = 64
TEST_BATCH_SIZE = 64
DATA_DIR = 'data/'
USE_CUDA = True
N_EPOCHS = 100
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST(DATA_DIR, train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST(DATA_DIR, train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=TEST_BATCH_SIZE, shuffle=True)
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
data, target = next(i for i in train_loader)
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(28*28, 50)
        self.fc2 = nn.Linear(50, 10)

    def forward(self, x):
        x = x.view(-1, 28*28)
        x = F.tanh(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=-1)
model = Net()

if USE_CUDA:
    try:
        model = model.cuda()
    except Exception as e:
        print(e)
        USE_CUDA = False
        N_EPOCHS = 5
optimizer = optim.Adam(model.parameters())
def train(epoch, verbose=True):
    model.train()
    losses = []
    loader = tqdm(train_loader, total=len(train_loader))
    for batch_idx, (data, target) in enumerate(loader):
        if USE_CUDA:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        losses.append(float(loss.data.item()))
        if verbose and batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    return np.mean(losses)
def test(verbose=True):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        if USE_CUDA:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        output = model(data)
        test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
        pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()

    test_loss /= len(test_loader.dataset)
    if verbose:
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
    return [float(test_loss), correct]
perfs = []
for epoch in range(1, N_EPOCHS + 1):
    t0 = time.time()
    train_loss = train(epoch, verbose=False)
    test_loss, correct = test(verbose=False)
    perfs.append([epoch, train_loss, test_loss,
                  correct, len(test_loader.dataset), time.time() - t0])
    print("epoch {}: train loss {:.4f}, test loss {:.4f}, accuracy {}/{} in {:.2f}s".format(*perfs[-1]))
100%|██████████| 938/938 [00:10<00:00, 87.55it/s]
c:python365_x64libsite-packagesipykernel_launcher.py:8: UserWarning: volatile was removed and now has no effect. Use with torch.no_grad(): instead.
epoch 1: train loss 0.2731, test loss 0.1773, accuracy 9480/10000 in 12.18s
100%|██████████| 938/938 [00:10<00:00, 89.61it/s]
epoch 2: train loss 0.2670, test loss 0.1744, accuracy 9477/10000 in 11.92s
100%|██████████| 938/938 [00:10<00:00, 90.66it/s]
epoch 3: train loss 0.2677, test loss 0.1720, accuracy 9475/10000 in 11.78s
100%|██████████| 938/938 [00:10<00:00, 91.44it/s]
epoch 4: train loss 0.2602, test loss 0.1774, accuracy 9442/10000 in 11.72s
100%|██████████| 938/938 [00:10<00:00, 87.40it/s]
epoch 5: train loss 0.2559, test loss 0.1628, accuracy 9499/10000 in 12.23s
df_perfs = pd.DataFrame(perfs, columns=["epoch", "train_loss", "test_loss",
                                        "accuracy", "n_test", "time"])
df_perfs
epoch train_loss test_loss accuracy n_test time
0 1 0.273088 0.177300 9480 10000 12.177797
1 2 0.267026 0.174420 9477 10000 11.916903
2 3 0.267738 0.171986 9475 10000 11.780004
3 4 0.260222 0.177376 9442 10000 11.715527
4 5 0.255861 0.162816 9499 10000 12.226404
df_perfs[["train_loss", "test_loss"]].plot();
../_images/200_Perceptron_MNIST_13_0.png
df_perfs[["train_loss", "test_loss"]].plot(ylim=(0, 0.2));
../_images/200_Perceptron_MNIST_14_0.png