1. 训练拟合
import tensorflow as tf import numpy as np x_data = np.random.rand(100).astype(np.float32) y_data = x_data * 0.1 + 0.3 Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) biases = tf.Variable(tf.zeros([1])) y = Weights * x_data + biases loss = tf.reduce_mean(tf.square(y - y_data)) optimizer = tf.train.GradientDescentOptimizer(0.5) train = optimizer.minimize(loss) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for step in range(201): sess.run(train) if step % 20 == 0: print(step, sess.run(Weights), sess.run(biases))
2. Session 会话控制
import tensorflow as tf import numpy as np matrix1 = tf.constant([[3, 3]]) matrix2 = tf.constant([[2], [2]]) product = tf.matmul(matrix1, matrix2) ### method 1 # sess = tf.Session() # result = sess.run(product) # print(result) # sess.close() ### method 2 # with tf.Session() as sess: # result2 = sess.run(product) # print(result2)
3.Variable 变量
import tensorflow as tf import numpy as np state = tf.Variable(0, name = 'counter') print(state.name) one = tf.constant(1) new_value = tf.add(state, one) update = tf.assign(state, new_value) init = tf.initialize_all_variables() with tf.Session() as sess: sess.run(init) for _ in range(3): sess.run(update) print(sess.run(state))
4. placeholder 传入值
import tensorflow as tf import numpy as np input1 = tf.placeholder(tf.float32) input2 = tf.placeholder(tf.float32) output = tf.multiply(input1, input2) with tf.Session() as sess: print(sess.run(output, feed_dict = {input1:[7.], input2: [2.]}))
5.添加神经网络层 add_layer
import tensorflow as tf import numpy as np def add_layer(inputs, in_size, out_size, activation_function = None): Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else: output = activation_function(Wx_plus_b) return outputs
6. 建造神经网络
import tensorflow as tf import numpy as np def add_layer(inputs, in_size, out_size, activation_function = None): Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs x_data = np.linspace(-1, 1, 300)[:, np.newaxis] noise = np.random.normal(0, 0.05, x_data.shape) y_data = np.square(x_data) - 0.5 + noise xs = tf.placeholder(tf.float32, [None, 1]) ys = tf.placeholder(tf.float32, [None, 1]) l1 = add_layer(xs, 1, 10, activation_function = tf.nn.relu) predition = add_layer(l1, 10, 1, activation_function = None) loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), reduction_indices = [1])) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000): sess.run(train_step, feed_dict = {xs:x_data, ys:y_data}) if i % 50 == 0: print(sess.run(loss, feed_dict = {xs:x_data, ys:y_data}))
7. 可视化 plot result
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt def add_layer(inputs, in_size, out_size, activation_function = None): Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs x_data = np.linspace(-1, 1, 300)[:, np.newaxis] noise = np.random.normal(0, 0.05, x_data.shape) y_data = np.square(x_data) - 0.5 + noise xs = tf.placeholder(tf.float32, [None, 1]) ys = tf.placeholder(tf.float32, [None, 1]) l1 = add_layer(xs, 1, 10, activation_function = tf.nn.relu) prediction = add_layer(l1, 10, 1, activation_function = None) loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices = [1])) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.scatter(x_data, y_data) plt.ion() plt.show() for i in range(1000): sess.run(train_step, feed_dict = {xs:x_data, ys:y_data}) if i % 50 == 0: #print(sess.run(loss, feed_dict = {xs:x_data, ys:y_data})) try: ax.lines.remove(lines[0]) except Exception: pass prediction_value = sess.run(prediction, feed_dict = {xs: x_data}) lines = ax.plot(x_data, prediction_value, 'r-', lw = 5) plt.pause(0.1)
8. tensorboard可视化
import tensorflow as tf import numpy as np tf.set_random_seed(1) np.random.seed(1) # fake data x = np.linspace(-1, 1, 100)[:, np.newaxis] # shape (100, 1) noise = np.random.normal(0, 0.1, size=x.shape) y = np.power(x, 2) + noise # shape (100, 1) + some noise with tf.variable_scope('Inputs'): tf_x = tf.placeholder(tf.float32, x.shape, name='x') tf_y = tf.placeholder(tf.float32, y.shape, name='y') with tf.variable_scope('Net'): l1 = tf.layers.dense(tf_x, 10, tf.nn.relu, name='hidden_layer') output = tf.layers.dense(l1, 1, name='output_layer') # add to histogram summary tf.summary.histogram('h_out', l1) tf.summary.histogram('pred', output) loss = tf.losses.mean_squared_error(tf_y, output, scope='loss') train_op = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(loss) tf.summary.scalar('loss', loss) # add loss to scalar summary sess = tf.Session() sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter('D:/learning/code/python/logs/', sess.graph) # write to file merge_op = tf.summary.merge_all() # operation to merge all summary for step in range(100): # train and net output _, result = sess.run([train_op, merge_op], {tf_x: x, tf_y: y}) writer.add_summary(result, step)
接下来进入到logs的上一层
cd D:/learning/code/python/ tensorboard --logdir=logs/
进入local:6006 Graph