1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
| """ # @Time : 2020/10/20 # @Author : Jimou Chen """ import torch from torch import nn, optim from torch.autograd import Variable from torchvision import datasets, transforms from torch.utils.data import DataLoader
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(784, 10) self.softmax = nn.Softmax(dim=1)
def forward(self, x): x = x.view(x.size()[0], -1) x = self.fc1(x) x = self.softmax(x) return x
if __name__ == '__main__': train_data = datasets.MNIST(root='./', train=True, transform=transforms.ToTensor(), download=True) test_data = datasets.MNIST(root='./', train=False, transform=transforms.ToTensor(), download=True)
batch_size = 64 train_load = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True) test_load = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True)
model = Net() mse_loss = nn.MSELoss() opt = optim.SGD(model.parameters(), lr=0.5)
def train(): for i, data in enumerate(train_load): input_data, labels = data out = model(input_data) labels = labels.reshape(-1, 1) one_hot = torch.zeros(input_data.shape[0], 10).scatter(1, labels, 1) loss = mse_loss(out, one_hot) opt.zero_grad() loss.backward() opt.step()
def test(): correct = 0 for i, data in enumerate(test_load): input_data, labels = data out = model(input_data) max_value, pred_index = torch.max(out, 1) correct += (pred_index == labels).sum()
print('准确率:{0}'.format(correct.item() / len(test_data)))
for i in range(10): print(i, ':', end='') train() test()
|