数学模型

对数线性回归:

lny = wx+b;
y=ewx+b
Y = wx+b;
g(y) =wx+b;

y=h(wx+b) = g-1(wx+b), g(.)作为联系函数


高维模型

Y=g-1(WTX)
【 其中 W = (W0+W1+……+Wm)T, X = (X0+X1+……+Xm)T, X0 = 1 】

sigmoid函数

y = g-1(z) = 1/(1+e-(wx+b))

sigmoid多元模型

y = 1/(1+e-(WTX))
【 其中 W = (W0+W1+……+Wm)T, X = (X0+X1+……+Xm)T, X0 = 1 】

代码实现

# 多元逻辑回归是一种线性分类器
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib as mpl
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

filePath = "C:/Users/gx/.keras/datasets/iris_training.csv"
test_url = 'http://download.tensorflow.org/data/iris_test.csv'
test_path = tf.keras.utils.get_file(test_url.split('/')[-1], test_url)

df_iris_train = pd.read_csv(filePath, header=0)
df_iris_test = pd.read_csv(test_path, header=0)

# print(df_iris)

# 处理数据, 转化为NumPy数组
iris_train = np.array(df_iris_train)
iris_test = np.array(df_iris_test)
# print(iris_train.shape) => (120,5)
# print(iris_test.shape) => (30, 5)

# 提取属性和标签 划分数据x,y,
train_x = iris_train[:, 0:2] # shape = (120,2)
train_y = iris_train[:, 4] # shape = (120, ) 第四组数据代表花的种类

test_x = iris_test[:, 0:2]
test_y = iris_test[:, 4]

# 提取前两种花作为训练集
x_train = train_x[train_y < 2] # shape=(78,2)
y_train = train_y[train_y < 2] # shape=(78, )

x_test = test_x[test_y < 2]
y_test = test_y[test_y < 2]
# print(x_train)
num_train = len(x_train) # 78
num_test = len(x_test) # 22

# 可视化样本
cm_pt = mpl.colors.ListedColormap(['red', 'blue'])
# plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap=cm_pt)
# plt.show()

# 属性中心化,按列中心化, axis=0
x_train = x_train - np.mean(x_train, axis=0)
x_test = x_test - np.mean(x_test, axis=0)

# 生标签列向量
x0_train = np.ones(num_train).reshape(-1,1)
x0_test = np.ones(num_test).reshape(-1,1)

# 构造属性矩阵
X_train = tf.cast(tf.concat((x0_train, x_train), 1), tf.float32)
Y_train = tf.cast(y_train.reshape(-1,1), tf.float32)

X_test = tf.cast(tf.concat((x0_test, x_test),1), tf.float32)
Y_test = tf.cast(y_test.reshape(-1,1),tf.float32)
# X_train.shape=TensorShape([78,3])
# Y_train.shape=TensorShape([78,1])
# X_test.shape =TensorShape([22,3])
# Y_test.shape =TensorShape([22,1])

# 参数设置
learn_rate = 0.02
iter = 1000
display_step = 100

# 设置模型参数
np.random.seed(612)
W = tf.Variable(np.random.randn(3,1),dtype=tf.float32)
# print(W.shape) (3,1)

# 决策边界的绘制
x_ = [-1.5,1.5]
y_ = -(W[1]*x_ + W[0])/W[2]

plt.scatter(x_train[:,0], x_train[:,1], c=y_train, cmap=cm_pt)
plt.plot(x_, y_, color='red', linewidth=1.5)
plt.xlim([-1.5, 1.5])
plt.ylim([-1.5, 1.5])

# 容器
cross_train = [] # 交叉熵损失
acc_train = [] # 准确率
cross_test = []
acc_test = []

for i in range(0, iter+1):
    with tf.GradientTape() as tape:
        pred_train = 1/(1+tf.exp((-1)*tf.matmul(X_train, W)))
        Loss_train = -tf.reduce_mean(Y_train*tf.math.log(pred_train) + (1-Y_train)*tf.math.log(1-pred_train))
        pred_test = 1/(1+tf.exp((-1)*tf.matmul(X_test, W)))
        Loss_test = -tf.reduce_mean(Y_test*tf.math.log(pred_test)+(1-Y_test)*tf.math.log(1-pred_test))

    accuracy_train = tf.reduce_mean(tf.cast(tf.equal(tf.where(pred_train.numpy()<0.5, 0.0, 1.0), Y_train), tf.float32))
    accuracy_test = tf.reduce_mean(tf.cast(tf.equal(tf.where(pred_test.numpy()<0.5, 0.0, 1.0), Y_test), tf.float32))

    cross_train.append(Loss_train)
    cross_test.append(Loss_test)
    acc_train.append(accuracy_train)
    acc_test.append(accuracy_test)


    # 更新模型参数
    dl_dw = tape.gradient(Loss_train, W)
    W.assign_sub(learn_rate * dl_dw)

    if i % display_step == 0:
        print('i:%i, Loss_train:%f, accuracy_train: %f'%(i ,Loss_train, accuracy_train))
        print('i:%i, \t\t\t\t\t\t Loss_test:%f, accuracy_test: %f'%(i ,Loss_test, accuracy_test))
        y_ = -(W[1]*x_ + W[0])/W[2]
        plt.plot(x_, y_)

plt.figure()
plt.plot(cross_train, color='blue', label='loss')
plt.plot(acc_train, color='green', label='accuracy')
plt.legend()

# 绘制决策边界
# w1x1 + w2x2 + w0 = 0  => x2 = -(w1x1 + w0)/w2
# plt.show()

plt.figure()
plt.plot(cross_train, color='blue', label='train loss')
plt.plot(cross_test, color='green', label='test loss')
plt.ylabel('Loss')
plt.legend()

plt.figure()
plt.plot(acc_train, color='blue', label='train accuracy')
plt.plot(acc_test, color='green', label='test accuracy')
plt.ylabel("accuracy")
plt.legend()
plt.show()

print(W)

绘图结果:

不使用测试集的数据散点+决策分界图与损失+准确度图像
使用测试集后损失图像与准确度图像

数值输出: