View file src/colab/hyperparam.py - Download
# -*- coding: utf-8 -*-
"""hyperparam.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1515n22tDCNRtBYyO04N2jIlNAtjLEES0
Optimization of hyperparameter (learning rate)
"""
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.transforms import ToTensor
from torch.utils.data import TensorDataset
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")
torch.manual_seed(14)
transform = transforms.ToTensor()
training_data = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_data = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
batch_size = 50
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print(f"Shape of X [N, C, H, W]: {X.shape}")
print(f"Shape of y: {y.shape} {y.dtype}")
break
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.Sigmoid(),
nn.Linear(512, 512),
nn.Sigmoid(),
nn.Linear(512, 10)
)
def set_lr(self, lr):
self.optimizer = torch.optim.SGD(self.parameters(), lr=lr)
def forward(self, x):
logits = self.linear_relu_stack(x)
return logits
def zero_grad(self):
self.optimizer.zero_grad()
def optimize(self):
self.optimizer.step()
model = NeuralNetwork().to(device)
print(model)
loss_fn = torch.nn.CrossEntropyLoss()
def train(dataloader, model, loss_fn, lr):
size = len(dataloader.dataset)
model.set_lr(lr)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
X = torch.flatten(X, 1, 3) # transform X of shape 50, 1, 28, 28 into 50, 28*28
# Compute prediction error
pred = model(X)
# Compute loss
loss = loss_fn(pred, y)
# Backpropagation
model.zero_grad()
loss.backward()
# Optimize parameters
model.optimize()
if batch % 200 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn, lr):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.set_lr(lr)
model.eval()
test_loss = 0
correct = torch.tensor(0.0).requires_grad_(True)
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
X = torch.flatten(X, 1, 3) # transform X of shape 50, 1, 28, 28 into 50, 28*28
pred = model(X)
test_loss += loss_fn(pred, y)
correct += (pred.argmax(1) == y).type(torch.float32).sum().item()
test_loss /= num_batches
correct_rate = correct / size
print(f"Test Error: \n Accuracy: {(100*correct_rate):>0.1f}%, Avg loss: {test_loss:>8f} \n")
return correct_rate
epochs = 1
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, 0.1)
test(test_dataloader, model, loss_fn, 0.1)
print("Done!")
import math
def evaluate(llr):
lr = math.exp(llr)
print(f"Evaluate for lr = {lr}")
torch.manual_seed(14)
model = NeuralNetwork().to(device)
epochs = 1
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, lr)
correct = test(test_dataloader, model, loss_fn, lr)
print(f"Evaluate for lr = {lr} : correct = {correct}")
return correct
evaluate(0.1)
lr = torch.tensor(0.1).requires_grad_(True)
print(lr)
evaluate(lr)
llr = math.log(0.1)
eps = 0.01
g = 1
# while abs(g) > 0.001:
for i in range(5):
print (f"*** STEP {i} ***")
g = (evaluate(llr+eps) - evaluate(llr-eps)) / (2 * eps) # the gradient
llr = llr + 8 * g # optimize lr
print("")
lr = math.exp(llr)
print(f"Maximum correct for lr={lr} : correct={evaluate(lr)}")