多层感知机介绍

多层感知器(MLP,Multilayer Perceptron)是一种前馈人工神经网络模型。与上文提到的多类逻辑回归非常相似,主要区别在:输入层和输出层之间插入了一个到多个隐含层。
如下图,黄色的点为输入层,中间为隐含层,绿色的点为输出层:

这里可以思考一个问题:为什么要使用激活函数。如果我们不用激活函数,仅仅使用线性操作,那上图y^ = X · W1 · W2 = X · W3,这完全等价于用一个隐含层。推广到一般情况,即使设置了一百个隐含层,也会等价于只用一个隐含层。所以我们要在层之间插入非线性的激活函数。

从0开始学习实现多层感知机

代码

#!/usr/bin/env python
# -*- coding:utf-8 -*-
#Author: yuquanle
#2017/10/14
#沐神教程实战之多层感知机做分类
#本例子使用一个类似MNIST的数据集做分类,MNIST是分类数字,这个数据集分类服饰

from mxnet import ndarray as nd
import utils
batch_size = 256
train_data, test_data = utils.load_data_fashion_mnist(batch_size)


num_inputs = 28*28
num_outputs = 10

num_hidden = 256
weight_scale = 0.01

W1 = nd.random_normal(shape=(num_inputs, num_hidden), scale=weight_scale)
b1 = nd.zeros(num_hidden)
W2 = nd.random_normal(shape=(num_hidden, num_outputs), scale=weight_scale)
b2 = nd.zeros(num_outputs)

params = [W1, b1, W2, b2]
for param in params:
    param.attach_grad()

# 定义激励函数
def relu(X):
    return nd.maximum(X, 0)

# 定义模型
def net(X):
    # 输入数据转换成? * num_inputs。?为输入样本条数
    X = X.reshape((-1, num_inputs))
    # h1为隐藏层的输出
    h1 = relu(nd.dot(X, W1) + b1)
    # 通过全连接将隐藏层的输出映射到输出层
    output = nd.dot(h1, W2) + b2
    return output

#
from mxnet import gluon
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()


#
from mxnet import autograd as autograd
learning_rate = 0.5

for epoch in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)
        loss.backward()
        utils.SGD(params, learning_rate / batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += utils.accuracy(output, label)

    test_acc = utils.evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss / len(train_data),
        train_acc / len(train_data), test_acc))

结果:
Epoch 0. Loss: 0.791282, Train acc 0.745268, Test acc 0.802637
Epoch 1. Loss: 0.575680, Train acc 0.808965, Test acc 0.820605
Epoch 2. Loss: 0.530466, Train acc 0.823908, Test acc 0.830273
Epoch 3. Loss: 0.505710, Train acc 0.830430, Test acc 0.836816
Epoch 4. Loss: 0.490304, Train acc 0.834707, Test acc 0.836816
true labels
['t-shirt', 'trouser', 'pullover', 'pullover', 'dress,', 'pullover', 'bag', 'shirt', 'sandal']
predicted labels
['t-shirt', 'trouser', 'pullover', 'shirt', 'coat', 'shirt', 'bag', 'shirt', 'sandal']

Process finished with exit code 0

多层感知机—使用 Gluon

代码:

#!/usr/bin/env python
# -*- coding:utf-8 -*-
#Author: yuquanle
#2017/10/14
#沐神教程实战之多层感知机做分类
#本例子使用一个类似MNIST的数据集做分类,MNIST是分类数字,这个数据集分类服饰

from mxnet import ndarray as nd
import utils
batch_size = 256
train_data, test_data = utils.load_data_fashion_mnist(batch_size)


num_inputs = 28*28
num_outputs = 10

num_hidden = 256
weight_scale = 0.01

W1 = nd.random_normal(shape=(num_inputs, num_hidden), scale=weight_scale)
b1 = nd.zeros(num_hidden)
W2 = nd.random_normal(shape=(num_hidden, num_outputs), scale=weight_scale)
b2 = nd.zeros(num_outputs)

params = [W1, b1, W2, b2]
for param in params:
    param.attach_grad()

# 定义激励函数
def relu(X):
    return nd.maximum(X, 0)

# 定义模型
def net(X):
    # 输入数据转换成? * num_inputs。?为输入样本条数
    X = X.reshape((-1, num_inputs))
    # h1为隐藏层的输出
    h1 = relu(nd.dot(X, W1) + b1)
    # 通过全连接将隐藏层的输出映射到输出层
    output = nd.dot(h1, W2) + b2
    return output

#
from mxnet import gluon
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()


#
from mxnet import autograd as autograd
learning_rate = 0.5

for epoch in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)
        loss.backward()
        utils.SGD(params, learning_rate / batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += utils.accuracy(output, label)

    test_acc = utils.evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss / len(train_data),
        train_acc / len(train_data), test_acc))