使用Sequential方法训练鸢尾花模型
import tensorflow as tf
from sklearn import datasets
import numpy as np
import os
# 屏蔽警告
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# 训练集输入特征
x_train = datasets.load_iris().data
# 训练集的标签
y_train = datasets.load_iris().target
# 数据集的乱序
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
# 搭建网络结构
model = tf.keras.models.Sequential([
# (神经元个数,激活函数,正则化方法)
tf.keras.layers.Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
])
# 配置训练方法 SGD 优化器,learnRate=0.1
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
# 选择损失函数,激活函数使用softmax,输出是概率分布,选择False
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
# 选择评测指标
metrics=['sparse_categorical_accuracy'])
# 执行训练过程,(输入特征,标签,喂入数据,迭代次数,选择0.2部分作为测试集,验证准确率的间隔次数)
model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)
# 打印模型结构
model.summary()
训练结果:
使用Sequential方法训练Fashion图像识别模型
import tensorflow as tf
fashion = tf.keras.datasets.fashion_mnist
(x_train, y_train),(x_test, y_test) = fashion.load_data()
# 归一化数据
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
# 拉直数据
tf.keras.layers.Flatten(),
# 定义神经元个数,和激活函数
tf.keras.layers.Dense(128, activation='relu'),
# 定义二层神经元和激活函数
tf.keras.layers.Dense(10, activation='softmax')
])
# 配置训练
model.compile(optimizer='adam',
# 选择损失函数
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy']
)
# fit训练
model.fit(x_train, y_train, batch_size=64, epochs=10, validation_data=(x_test, y_test), validation_freq=1)
model.summary() 输出结果:
需要注意的是此时出现了过拟合
使用class训练Fashion识别
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras import Model
fashion = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion.load_data()
# 归一化
x_train, x_test = x_train / 255.0, x_test / 255.0
class MnistModel(Model):
def __init__(self):
super(MnistModel, self).__init__()
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.flatten(x)
x = self.d1(x)
y = self.d2(x)
return y
model = MnistModel()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1)
model.summary() 
京公网安备 11010502036488号