pytorch搭建全连接网络识别MNIST数据集

导包

1
2
3
4
5
6
import torch 
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
import matplotlib.pyplot as plt

定义参数

1
2
3
4
5
6
7
8
9
10
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Hyper-parameters
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 10
batch_size = 100
learning_rate = 0.001

导入MNIST数据集,定义数据加载器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# MNIST dataset 
train_dataset = torchvision.datasets.MNIST(root='data/mnist/',
train=True,
transform=transforms.ToTensor(),
download=False)

test_dataset = torchvision.datasets.MNIST(root='data/mnist/',
train=False,
transform=transforms.ToTensor())

# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)

定义网络

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)

def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out

model = NeuralNet(input_size, hidden_size, num_classes).to(device)

定义损失函数和优化器

1
2
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

训练模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)

# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)

# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()

if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
Epoch [1/10], Step [100/600], Loss: 0.3704
Epoch [1/10], Step [200/600], Loss: 0.2298
Epoch [1/10], Step [300/600], Loss: 0.2699
Epoch [1/10], Step [400/600], Loss: 0.2355
Epoch [1/10], Step [500/600], Loss: 0.2695
Epoch [1/10], Step [600/600], Loss: 0.1755
Epoch [2/10], Step [100/600], Loss: 0.1354
Epoch [2/10], Step [200/600], Loss: 0.0762
Epoch [2/10], Step [300/600], Loss: 0.0893
Epoch [2/10], Step [400/600], Loss: 0.1229
Epoch [2/10], Step [500/600], Loss: 0.0545
Epoch [2/10], Step [600/600], Loss: 0.0268
Epoch [3/10], Step [100/600], Loss: 0.0358
Epoch [3/10], Step [200/600], Loss: 0.0872
Epoch [3/10], Step [300/600], Loss: 0.0946
Epoch [3/10], Step [400/600], Loss: 0.0441
Epoch [3/10], Step [500/600], Loss: 0.1179
Epoch [3/10], Step [600/600], Loss: 0.0320
Epoch [4/10], Step [100/600], Loss: 0.0273
Epoch [4/10], Step [200/600], Loss: 0.0865
Epoch [4/10], Step [300/600], Loss: 0.0621
Epoch [4/10], Step [400/600], Loss: 0.0578
Epoch [4/10], Step [500/600], Loss: 0.0433
Epoch [4/10], Step [600/600], Loss: 0.0991
Epoch [5/10], Step [100/600], Loss: 0.0414
Epoch [5/10], Step [200/600], Loss: 0.0539
Epoch [5/10], Step [300/600], Loss: 0.0586
Epoch [5/10], Step [400/600], Loss: 0.0080
Epoch [5/10], Step [500/600], Loss: 0.0269
Epoch [5/10], Step [600/600], Loss: 0.0598
Epoch [6/10], Step [100/600], Loss: 0.0172
Epoch [6/10], Step [200/600], Loss: 0.0168
Epoch [6/10], Step [300/600], Loss: 0.0583
Epoch [6/10], Step [400/600], Loss: 0.0109
Epoch [6/10], Step [500/600], Loss: 0.0197
Epoch [6/10], Step [600/600], Loss: 0.0407
Epoch [7/10], Step [100/600], Loss: 0.0273
Epoch [7/10], Step [200/600], Loss: 0.0346
Epoch [7/10], Step [300/600], Loss: 0.0148
Epoch [7/10], Step [400/600], Loss: 0.0235
Epoch [7/10], Step [500/600], Loss: 0.0157
Epoch [7/10], Step [600/600], Loss: 0.0589
Epoch [8/10], Step [100/600], Loss: 0.0116
Epoch [8/10], Step [200/600], Loss: 0.0071
Epoch [8/10], Step [300/600], Loss: 0.0067
Epoch [8/10], Step [400/600], Loss: 0.0111
Epoch [8/10], Step [500/600], Loss: 0.0081
Epoch [8/10], Step [600/600], Loss: 0.0023
Epoch [9/10], Step [100/600], Loss: 0.0062
Epoch [9/10], Step [200/600], Loss: 0.0041
Epoch [9/10], Step [300/600], Loss: 0.0011
Epoch [9/10], Step [400/600], Loss: 0.0011
Epoch [9/10], Step [500/600], Loss: 0.0050
Epoch [9/10], Step [600/600], Loss: 0.0390
Epoch [10/10], Step [100/600], Loss: 0.0018
Epoch [10/10], Step [200/600], Loss: 0.0152
Epoch [10/10], Step [300/600], Loss: 0.0134
Epoch [10/10], Step [400/600], Loss: 0.0181
Epoch [10/10], Step [500/600], Loss: 0.0428
Epoch [10/10], Step [600/600], Loss: 0.0164

测试模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()

print('Accuracy of the network on the 10000 test images: {} %'.
format(100 * correct / total))

# Save the model checkpoint
# torch.save(model.state_dict(), 'model.ckpt')
Accuracy of the network on the 10000 test images: 97.72 %