[size=1em]1
[size=1em]2
[size=1em]3
[size=1em]4
[size=1em]5
[size=1em]6
[size=1em]7
[size=1em]8
[size=1em]9
[size=1em]10
[size=1em]11
[size=1em]12
[size=1em]13
[size=1em]14
[size=1em]15
[size=1em]16
[size=1em]17
[size=1em]18
[size=1em]19
[size=1em]20
[size=1em]21
[size=1em]22
[size=1em]23
[size=1em]24
[size=1em]25
[size=1em]26
[size=1em]27
[size=1em]28
[size=1em]29
[size=1em]30
[size=1em]31
[size=1em]32
[size=1em]33
[size=1em]34
[size=1em]35
[size=1em]36
[size=1em]37
[size=1em]38
[size=1em]39
[size=1em]40
[size=1em]41
[size=1em]42
[size=1em]43
[size=1em]44
[size=1em]45
[size=1em]46
[size=1em]47
[size=1em]48
[size=1em]49
[size=1em]50
[size=1em]51
[size=1em]52
[size=1em]53
[size=1em]54
[size=1em]55
[size=1em]56
[size=1em]57
[size=1em]58
[size=1em]59
[size=1em]60
[size=1em]61
[size=1em]62
[size=1em]63
[size=1em]64
[size=1em]65
[size=1em]66
[size=1em]67
[size=1em]68
[size=1em]69
[size=1em]70
[size=1em]71
[size=1em]72
| [size=1em][size=1em]import torch
[size=1em]import torch.nn as nn
[size=1em]import torch.utils.data as Data
[size=1em]import torchvision
[size=1em]# torch.manual_seed(1)
[size=1em]
[size=1em]EPOCH = 1
[size=1em]BATCH_SIZE = 50
[size=1em]LR = 0.001
[size=1em]DOWNLOAD_MNIST = False
[size=1em]
[size=1em]train_data = torchvision.datasets.MNIST(root='./mnist/', train=True, transform=torchvision.transforms.ToTensor(), download=DOWNLOAD_MNIST,)
[size=1em]train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
[size=1em]
[size=1em]test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
[size=1em]
[size=1em]# !!!!!!!! Change in here !!!!!!!!! #
[size=1em]test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000].cuda()/255. # Tensor on GPU
[size=1em]test_y = test_data.test_labels[:2000].cuda()
[size=1em]
[size=1em]class CNN(nn.Module):
[size=1em] def __init__(self):
[size=1em] super(CNN, self).__init__()
[size=1em] self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2,),
[size=1em] nn.ReLU(), nn.MaxPool2d(kernel_size=2),)
[size=1em] self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(2),)
[size=1em] self.out = nn.Linear(32 * 7 * 7, 10)
[size=1em]
[size=1em] def forward(self, x):
[size=1em] x = self.conv1(x)
[size=1em] x = self.conv2(x)
[size=1em] x = x.view(x.size(0), -1)
[size=1em] output = self.out(x)
[size=1em] return output
[size=1em]
[size=1em]cnn = CNN()
[size=1em]
[size=1em]# !!!!!!!! Change in here !!!!!!!!! #
[size=1em]cnn.cuda() # Moves all model parameters and buffers to the GPU.
[size=1em]
[size=1em]optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
[size=1em]loss_func = nn.CrossEntropyLoss()
[size=1em]
[size=1em]for epoch in range(EPOCH):
[size=1em] for step, (x, y) in enumerate(train_loader):
[size=1em]
[size=1em] # !!!!!!!! Change in here !!!!!!!!! #
[size=1em] b_x = x.cuda() # Tensor on GPU
[size=1em] b_y = y.cuda() # Tensor on GPU
[size=1em]
[size=1em] output = cnn(b_x)
[size=1em] loss = loss_func(output, b_y)
[size=1em] optimizer.zero_grad()
[size=1em] loss.backward()
[size=1em] optimizer.step()
[size=1em]
[size=1em] if step % 50 == 0:
[size=1em] test_output = cnn(test_x)
[size=1em]
[size=1em] # !!!!!!!! Change in here !!!!!!!!! #
[size=1em] pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU
[size=1em]
[size=1em] accuracy = torch.sum(pred_y == test_y).type(torch.FloatTensor) / test_y.size(0)
[size=1em] print('Epoch: ', epoch, '| train loss: %.4f' % loss, '| test accuracy: %.2f' % accuracy)
[size=1em]
[size=1em]test_output = cnn(test_x[:10])
[size=1em]
[size=1em]# !!!!!!!! Change in here !!!!!!!!! #
[size=1em]pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU
[size=1em]
[size=1em]print(pred_y, 'prediction number')
[size=1em]print(test_y[:10], 'real number')
|