我是链接

第一次用pytorch写机器学习,不得不说是真的好用

pytorch的学习可以看这里,看看基本用法就行,个人感觉主要还是要看着实践代码来学习

总结了几个点:

1.loss出现nan

这个让我头疼了好久,主要有两个方面吧:一是学习率可能太高了,可以调低一点试试。二是对于这个数据,黑白值颜色深度是用0255来表示的,让每个颜色深度除以255变成01来表示,结果会好很多,准确率也会高很多。

另外听说训练数据里有nan inf 除以0 也会出现nan

2.训练的时候犯傻了,好几万的数据训练的时候只用了前几百个。我还纳闷为啥准确率那么低(89%左右),后来发现barch挺大,但只有几百个训练了。

把上面说的改了后正确率到达98.2%,后面慢慢来改进

神经网络的模型是看着网上的搭建的,2个卷积层和池化层,2个全连接层

import torch
import torch.nn as nn
import pandas as pd
import numpy as np


class WYJ_CNN(nn.Module):
    def __init__(self):
        super(WYJ_CNN, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=16,  # 输入通道1 输出通道16
                               kernel_size=(3, 3),  # 卷积核尺寸
                               stride=(1, 1),  # 卷积核每次移动多少个像素
                               padding=1)  # 原图片边缘加几个空白像素
        # 输入尺寸1×28×28
        # 输出尺寸16×28×28
        self.pool1 = nn.MaxPool2d(kernel_size=2)  #第一次池化,尺寸16×14×14
        self.conv2 = nn.Conv2d(16, 32, 3, 1, 1)  #第二次卷积,尺寸32×14×14
        self.pool2 = nn.MaxPool2d(2)  #第二次池化,尺寸32×7×7
        self.zhankai = nn.Flatten()#展平为张量,尺寸是一维1568(32*7*7)
        self.lin1 = nn.Linear(32 * 7 * 7, 16)#尺寸16
        self.jihuo = nn.ReLU()#激活函数
        self.lin2 = nn.Linear(16, 10)#尺寸10

    def forward(self, x):
        x = self.conv1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.zhankai(x)
        x = self.lin1(x)
        x = self.jihuo(x)
        x = self.lin2(x)
        return x


myTrain = pd.read_csv('train.csv')
vals = np.array(myTrain.values)
labels = torch.from_numpy(myTrain.values[:, 0])

net = WYJ_CNN()
CalcLoss = nn.CrossEntropyLoss()#loss用交叉熵来算
optimizer = torch.optim.SGD(net.parameters(), lr=0.2)#lr是学习率

batch = 128
for cnt in range(10):
    for i in range(len(vals) // batch):
        tmp = vals[i * batch:(i + 1) * batch, 1:] / 255#将0~255的颜色深度转化为0~1的深度
        tmp = tmp.reshape(batch, 1, 28, 28)
        tmp = torch.from_numpy(tmp).float()
        outputs = net(tmp)
        # print(outputs)
        loss = CalcLoss(outputs, labels[i * batch:(i + 1) * batch])

        # loss = loss.requires_grad_()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if i * batch % 1000 == 0:
            print("training", i * batch)

torch.save(net, "my_cnn.nn")

# net = torch.load("my_cnn.nn")
myTest = pd.read_csv('test.csv')
ImageId = [];
Label = [];

for i in range(len(myTest)):
    tmp = np.array(myTest.values[i][:]) / 255
    tmp = tmp.reshape(1, 1, 28, 28)
    tmp = torch.from_numpy(tmp).float()
    b = net(tmp)

    ImageId.append(i + 1)
    Label.append(b.argmax().item())

    if i % 1000 == 0:
        print("testing", i)

myAns = pd.DataFrame({'ImageId': ImageId, 'Label': Label})
myAns.to_csv("myAns.csv", index=False, sep=',')

最近在学飞桨,放一个飞桨版本

import paddle
import paddle.nn as nn
import pandas as pd
import numpy as np


class WYJ_CNN(nn.Layer):
    def __init__(self):
        super(WYJ_CNN, self).__init__()
        self.conv1 = nn.Conv2D(in_channels=1, out_channels=16,  # 输入通道1 输出通道16
                               kernel_size=(3, 3),  # 卷积核尺寸
                               stride=(1, 1),  # 卷积核每次移动多少个像素
                               padding=1)  # 原图片边缘加几个空白像素
        # 输入尺寸1×28×28
        # 输出尺寸16×28×28
        self.pool1 = nn.MaxPool2D(kernel_size=2)  #第一次池化,尺寸16×14×14
        self.conv2 = nn.Conv2D(16, 32, 3, 1, 1)  #第二次卷积,尺寸32×14×14
        self.pool2 = nn.MaxPool2D(2)  #第二次池化,尺寸32×7×7
        self.zhankai = nn.Flatten()#展平为张量,尺寸是一维1568(32*7*7)
        self.lin1 = nn.Linear(32 * 7 * 7, 16)#尺寸16
        self.jihuo = nn.ReLU()#激活函数
        self.lin2 = nn.Linear(16, 10)#尺寸10

    def forward(self, x):
        x = self.conv1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.zhankai(x)
        x = self.lin1(x)
        x = self.jihuo(x)
        x = self.lin2(x)
        return x


myTrain = pd.read_csv('train.csv')
vals = np.array(myTrain.values)
labels = paddle.to_tensor(myTrain.values[:, 0])

net = WYJ_CNN()
CalcLoss = nn.CrossEntropyLoss()#loss用交叉熵来算
optimizer = paddle.optimizer.SGD(learning_rate =0.2 , parameters=net.parameters() )#lr是学习率

batch = 128
for cnt in range(10):
    for i in range(len(vals) // batch):
        tmp = vals[i * batch:(i + 1) * batch, 1:] / 255#将0~255的颜色深度转化为0~1的深度
        tmp = tmp.reshape(batch, 1, 28, 28)#转化形式,变成能输入网络的形式
        tmp = paddle.to_tensor(tmp).astype(np.float32)#变成float
        outputs = net(tmp)
        # print(outputs)
        loss = CalcLoss(outputs, labels[i * batch:(i + 1) * batch])

        # loss = loss.requires_grad_()

        optimizer.clear_grad()
        loss.backward()
        optimizer.step()

        if i * batch % 1000 == 0:
            print("training", i * batch)
# net = paddle.load("my_cnn.nn")
myTest = pd.read_csv('test.csv')
ImageId = [];
Label = [];

for i in range(len(myTest)):
    tmp = np.array(myTest.values[i][:]) / 255
    tmp = tmp.reshape(1, 1, 28, 28)
    tmp = paddle.to_tensor(tmp).astype(np.float32)
    b = net(tmp)

    ImageId.append(i + 1)
    Label.append(b.argmax().item())

    if i % 1000 == 0:
        print("testing", i)

myAns = pd.DataFrame({'ImageId': ImageId, 'Label': Label})
myAns.to_csv("myAns.csv", index=False, sep=',')