import torch
T = torch.Tensor([[1,2],[3,4]])
print(T)
print(T**2)
v = torch.tensor([ 10., 20., 30.])
M = torch.tensor([[ 0., 0., 3. ], [ 0., 2., 0. ], [ 1., 0., 0. ]])
print(v), print(M)
print(M.mv(v)) # M*v
print(M @ v)
T = torch.empty(2, 4)
T.fill_(0.05)
print(T)
T.add_(2)
print(T)
T += torch.randn(T.size())
print(T)
import numpy as np
v = np.ones(6)
print(v)
T = torch.from_numpy(v)
print(T)
T1 = torch.randn(3,3)
print(T1)
v1 = T1.numpy()
print(v1)
T.add_(1)
print(T)
print(v)
np.add(v1, 3, out=v1)
print(v1)
print(T1)
# A simple example:
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True)
# Build a computational graph.
y = w * x + b # y = 2 * x + 3
print(y.grad_fn)
y.backward()
print(x.grad)
print(w.grad)
print(b.grad)
import torch.nn as nn
import torch.nn.functional as F
x = torch.randn(2,3)
print(x)
x = F.relu(x)
print(x)
f = nn.Linear(in_features = 10, out_features = 4)
for n, p in f.named_parameters(): print(n, p.size())
x = torch.empty(350, 10).normal_()
y = f(x)
print(y.size())
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
import torchvision
import torchvision.transforms as transforms
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
model = NeuralNet(input_size, hidden_size, num_classes)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28)
labels = labels
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
labels = labels
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')