Tensorflow深度学习应用(进阶篇)-回归(函数拟合训练)-可视化

#coding=gbk
'''
进阶篇:
    多元回归:
    建模问题:
    Y=x1xx1+x2xw2+x3xw3+...+xnxwn+b,矩阵简化表示Y=XW+b

    numpy库基础:
    整型的一个数字,  不能取得其shape(维度),通过np.array()可以将其转换成一个标量(数组),可以取得shape;

    一般问题:
    需要对重要的特征以及次要特征进行范围的限定,即需要对数据进行归一化,有助于模型的收敛。
    特征值/(最大特征值-最小特征值)~[0,1]

    特征数据归一化:
    特征值/(最大特征值-最小特征值)~[0,1],标签不做处理


扩展篇
'''


from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']  #设置显示绘图显示中文
mpl.rcParams['axes.unicode_minus'] = False  #防止中文乱码,有时候第一句不能完全避免显示错误

#导入tensorflow 模块
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston#下载数据
from sklearn.utils import shuffle#用于打乱数据

#numpy库运用
#将一个整型数字转换成一个数组
s_v = 20
scalar_np = np.array(s_v)
print("标量:\n",scalar_np,scalar_np.shape)

#向量
vector_v = [1, 2, 3, 4, 5, 6, 7, 8, 9]
vector_np = np.array(vector_v)
print("向量:\n",vector_np, vector_np.shape)

#矩阵
m_v = [[1, 2, 3], [7, 8, 9], [4, 5, 6]]
m_np = np.array(m_v)
print("矩阵:\n",m_np,m_np.shape)

#行向量、列向量的表示
row = np.array([[1, 2, 3]])
print("行向量:\n",row,row.shape)

column = np.array([[1], [2], [3]])
print("列向量:\n", column, column.shape)


#矩阵的运算
a = np.array([[1, 2, 3], [4, 5, 6]])
print(a)
a = a + 7
print(a)
a = a * 2
print(a)
a = a + a
print(a)
a = a - 3
print(a)
a = a / 2.0
print(a)
#对于+,-,*,/,都是对应元素做相应的操作

#行列转置:aij=aji,reshape()
m = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(m.T)

#矩阵的点积,对应元素相乘,np.multiply()

#矩阵叉乘:两矩阵维度为 MxN,NxK,使用函数np.matmul(a,b)
m_a = np.array([[1, 2, 3]])
m_b = np.array([[3], [2], [1]])
m_c = np.matmul(m_a, m_b)
print(m_c)

#下载数据,设置return_X_y=True去除摘要只返回数据和标签
boston_price_data ,_= load_boston(return_X_y=True)
print(boston_price_data, boston_price_data.shape)

#数据归一化[0,1]
for i in range(12):
   boston_price_data[:, i] = boston_price_data[:, i] / (boston_price_data[:, i].max() - boston_price_data[:, i].min())
#取出数据
x_d = boston_price_data[:, :12]
print(x_d, x_d.shape)
print("\n")
#取出标签
y_d = boston_price_data[:, 12]
print(y_d, y_d.shape)

#模型定义
x = tf.placeholder(tf.float32, [None, 12], name='x')
y = tf.placeholder(tf.float32, [None, 1], name='y')

#拟合Y=x1xx1+x2xw2+x3xw3+...+xnxwn+b,矩阵简化表示Y=XW+b
with tf.name_scope("model"):#将下面的子图打包,使计算图简介,便于tensorboard查看
    w = tf.Variable(tf.random_normal([12, 1], stddev=0.01, name='w'))#随机初始化w数组
    b = tf.Variable(1.0, name='b')  #初始化b
    def model(x, w, b):
        return tf.matmul(x, w) + b  #矩阵叉乘
    predict = model(x, w, b)        #预测模型
    
#模型训练
train_c = 80
learning_rate = 0.01
with tf.name_scope("LossFun"):#打包
    loss_Fun = tf.reduce_mean(tf.pow(y - predict, 2))#均方误差
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_Fun)
#优化器设置

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

loss_l = []
for i in range(train_c):
    loss_s = 0.0
    for x_, y_ in zip(x_d, y_d):
        x_ = x_.reshape(1, 12)#需要将取出来的数据进行维度调整,是x_,y_适合x,y变量维度模型
        y_=y_.reshape(1,1)
        _, loss = sess.run([optimizer, loss_Fun], feed_dict={x: x_, y: y_})
        '''用于可视化的语句
        _,summary_s, loss = sess.run([optimizer,summary_loss_op, loss_Fun], feed_dict={x: x_, y: y_})
        write.add_summary(summary_s,i)
        '''
        loss_s = loss_s + loss
    x_v, y_v = shuffle(x_d, y_d)  #打乱数据
    b0 = b.eval(session=sess)
    w0 = w.eval(session=sess)
    less_average = loss_s / len(y_d)
    
    loss_l.append(less_average)
    print("train count=", i + 1, "loss=", less_average, "b=", b0, "w=", w0)
    
#模型验证
x_test = x_d[430]
x_test = x_test.reshape(1, 12)
p = sess.run(predict, feed_dict={x: x_test})
print("预测值:%f"%p,"标签值:%f\n"%y_d[430])

plt.plot(loss_l)
plt.title("损失变化曲线(loss)")
plt.show()

logdir = "E:/VSCODE/"
if tf.gfile.Exists(logdir):
    tf.gfile.DeleteRecursively(logdir)

'''tensorboard可视化数据,
summary_loss_op=tf.summary.scalar("loss",loss_Fun)#记录损失值loss,写入到tensorboard中的SCALARS栏中
merged=tf.summary.merge_all()#将所有需要的日志文件合并写入
'''
write = tf.summary.FileWriter(logdir, tf.get_default_graph())
write.close()

在这里插入图片描述在这里插入图片描述
在这里插入图片描述
附:
本文章学习至中国大学mooc-深度学习应用开发-Tensorflow实战

发布了89 篇原创文章 · 获赞 8 · 访问量 8871

猜你喜欢

转载自blog.csdn.net/TxyITxs/article/details/103855481
今日推荐