Tensorflow学习笔记(5)-网络结构的构建

版权声明:博客仅供参考,有什么意见,请在下方留言,转载时请附上链接,谢谢! https://blog.csdn.net/u010105243/article/details/76683964

1.构建网络结构

# -*- coding: utf-8 -*-
# @Time    : 17-8-4 上午10:57
# @Author  : 未来战士biubiu!!
# @FileName: 5-buildstructure.py
# @Software: PyCharm Community Edition
# @Blog    :http://blog.csdn.net/u010105243/article/
from __future__ import print_function
import tensorflow as tf
import numpy as np

# 自定义网络的结构
def layer(inputs, in_size, out_size, activation_function=None):
    # 定义网络的权重,随机分配初始值效果更好,这里采用均匀分布,也可以替换其他的
    # [in_size,out_size]表示的是该层网络结构
    W = tf.Variable(tf.random_uniform([in_size, out_size]))
    b = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    y_ = tf.matmul(inputs, W) + b  # W*x+b
    if activation_function is None:
        outputs = y_
    else:
        outputs = activation_function(y_)
    return outputs

# 数据的生成,产生300个样本
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

# 定义placeholder
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
l1 = layer(xs, 1, 10, activation_function=tf.nn.relu)
y_pre = layer(l1, 10, 1, activation_function=None)

# 计算loss均方差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - y_pre), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

# 定义激活的点
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for i in range(1000):
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 20 == 0:
        print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))

2.数据可视化

# -*- coding: utf-8 -*-
# @Time    : 17-8-4 上午10:57
# @Author  : 未来战士biubiu!!
# @FileName: 5-buildstructure.py
# @Software: PyCharm Community Edition
# @Blog    :http://blog.csdn.net/u010105243/article/
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# 自定义网络的结构
def layer(inputs, in_size, out_size, activation_function=None):
    # 定义网络的权重,随机分配初始值效果更好,这里采用均匀分布,也可以替换其他的
    # [in_size,out_size]表示的是该层网络结构
    W = tf.Variable(tf.random_uniform([in_size, out_size]))
    b = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    y_ = tf.matmul(inputs, W) + b  # W*x+b
    if activation_function is None:
        outputs = y_
    else:
        outputs = activation_function(y_)
    return outputs

# 数据的生成,产生300个样本
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

# 定义placeholder
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
l1 = layer(xs, 1, 10, activation_function=tf.nn.relu)
y_pre = layer(l1, 10, 1, activation_function=None)

# 计算loss均方差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - y_pre), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

# 定义激活的点
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

# 画图
fig=plt.figure() # 画出框架
ax=fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()  # 保证程序能够持续画图
plt.show()

for i in range(1000):
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 20 == 0:
        prediction=sess.run(y_pre,feed_dict={xs:x_data,ys:y_data})
        lines=ax.plot(x_data,prediction,'r_',lw=5) #lw:line width
        plt.pause(0.1)
        ax.lines.remove(lines[0])

猜你喜欢

转载自blog.csdn.net/u010105243/article/details/76683964