可训练变量Vraiable

import tensorflow as tf
import os
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
# 可训练的变量Variable
print(tf.Variable(3))
# 输出:
print(tf.Variable([1,3]))
# 输出:
print(tf.Variable(np.array([1,2,3]), dtype=tf.float32))
# 输出:
# 可训练变量的赋值 x.assign(), x.assign_add(), x.assign_sub()
assign = tf.Variable(tf.constant([1,2]))
assign.assign([3,3])
print("x.assign([3,3])= ", assign)
assign.assign_add([1,1])
print("x.assign_add([1,1])= ", assign)
assign.assign_sub([2,2])
print("x.assign_sub([2,2])= ", assign)
# 判断对象类别
print('assign is Varuable: ', isinstance(assign, tf.Variable))
print('assign is Tensor: ', isinstance(assign, tf.Tensor))
# tensorflow自动求导:GradientTape, 它实现了上下文管理,能够监视
# with语句中所有变量和计算过程,并保存在tape中
# with GradientTape() as tape:
#   函数表达式
# grad = tape.gradient(function,variable)
# 一阶偏导
x = tf.Variable(2.0)
# persistent代表tape可持续使用,使用完成后需要del tape手动释放
# watch_accessed_variables代表监视所有变量,tape.watch(x)添加监视
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
    tape.watch(x)
    y = x*x*x+x*x+9*x # f(x) =x^3 +x^2 +9x
    z = 5*x -x*x
dy_dx = tape.gradient(y, x)
dz_dx = tape.gradient(z, x)
print(y)
print(dy_dx)
print(z)
print(dz_dx)
del tape
# 二阶偏导
x2 = tf.Variable(2.0)
y2 = tf.Variable(3.0)
with tf.GradientTape(True) as tape2:
    with tf.GradientTape(True) as tape1:
        f = x2*x2 + y2*y2*2 + 1
    fisrt_grad = tape1.gradient(f, [x2, y2])
second_grad = tape2.gradient(fisrt_grad, [x2, y2])
print("f:", f)
print("fisrt_grad:", fisrt_grad)
print("second_grad:", second_grad)
del tape1, tape2

输出如下

tensorflow实现一元线性回归

import tensorflow as tf
import numpy as np

# create data
x = np.array([137.97,104.50,100.00,142.32,79.20,99.00,124.00,114.00,106.96,139.02,53.75,46.91,68.00,63.02,81.26,86.21, 100.00])
y = np.array([145.00,110.00,93.00,116.00,65.32,104.00,118.00,91.00,62.00,133.00,51.00,45.00,78.50,78.50,69.65,75.69,95.30])

# 设置参数
learn_rate = 0.0001
iter = 10
display_step = 1

# 设置模型参数初始值
np.random.seed(111)
w = tf.Variable(np.random.randn())
b = tf.Variable(np.random.randn())

# 训练模型
mse = []
for i in range(0, iter+1):
    with tf.GradientTape() as tape:
        pred = w*x+b
        Loss = 0.5*tf.reduce_mean(tf.square(y-pred))
    mse.append(Loss)
    dl_dw, dl_db = tape.gradient(Loss, [w,b])
    w.assign_sub(learn_rate*dl_dw)
    b.assign_sub(learn_rate*dl_db)
    if i%display_step == 0:
        print("i:%i, Loss:%f, w:%f, b:%f"%(i, Loss, w.numpy(), b.numpy()))

输出如下

多元线性回归

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = 'SimHei'

area = np.array([137.97,104.50,100.00,142.32,79.20,99.00,124.00,114.00,106.96,139.02,53.75,46.91,68.00,63.02,81.26,86.21, 100.00])
room = np.array([3,2,2,3,1,2,3,2,2,3,1,1,1,1,2,2,2])
price = np.array([145.00,110.00,93.00,116.00,65.32,104.00,118.00,91.00,62.00,133.00,51.00,45.00,78.50,78.50,69.65,75.69,95.30])

num = len(area)

# 处理数据
x0 = np.ones(num)
x1 = (area - area.min())/(area.max() - area.min())
x2 = (room - room.min())/(room.max() - room.min())
X = np.stack((x0, x1, x2), axis = 1)
Y = price.reshape(-1, 1)

# 设置参数
learn_rate = 0.1
iter = 500
display_step = 10

# 设置模型初始值
np.random.seed(1111)
W = tf.Variable(np.random.randn(3,1))

# 训练模型
mse = []
for i in range(0, iter+1):
    # pred = np.matmul(X, W)
    # Loss = 0.5 * np.mean(np.square(Y - pred))
    # mse.append(Loss)

    # dl_dw = np.matmul(np.transpose(X), np.matmul(X, W)-Y)
    # W = W - learn_rate*dl_dw
    with tf.GradientTape() as tape:
        pred = tf.matmul(X, W)
        Loss = 0.5*tf.reduce_mean(tf.square(Y-pred))
    mse.append(Loss)
    dl_dw = tape.gradient(Loss, W)
    W.assign_sub(learn_rate*dl_dw)

    if i % display_step == 0:
        print("i:%i, Loss:%f"%(i ,mse[i]))

结果如下


你可知这百年爱人只能陪半途

      你且信这世上至多好景可虚度