tensorflow1.13学习记录

数据流图、张量及数据类型

数据流图

import tensorflow as tf

# 定义数据流
a = tf.constant(2, name="input_a")
b = tf.constant(5, name="input_b")
c = tf.add(a,b,name="c")
d = tf.constant(8, name="input_d")
e = tf.multiply(c,d,name="e")

# 通过session告诉c++运行数据流图
sess = tf.Session()
out = sess.run(e)
print(out)

# 通过tensorboard查看数据流图
writer = tf.summary.FileWriter("./log",sess.graph)  
#sess.graph是一个默认属性,每一个session都有一个graph属性。

通过网页查看数据流图:

(tensorflow1.13-gpu) D:\Code\PycharmProjects\Rcommendation_realize\tensorflow1.13_learn\begin>tensorboard --logdir=./log

然后在浏览器打开localhost:6006

张量

可以看成n维的矩阵
输入的都是一个批次一个批次的数据 [n张图片,高,宽,rgb]

tensorflow支持的数据类型:
tf.float32, tf.int32, tf.bool, tf.string
经常用numpy为tensorflow提供数据源

session

feed_dict可以覆盖数据流图中tensor的值。用途:train的时候输入一个数据,test通过feed_dict改变

a = tf.constant(2)
b = tf.constant(3)
c = tf.add(a, b)
sess = tf.Session()
out = sess.run(c,feed_dict={
    
    a:20})  # feed_dict可以覆盖数据流图中tensor的值。用途:train的时候输入一个数据,test通过feed_dict改变
print(out)
sess.close()

占位符:先定义好,运行时用feed_dict填充

a = tf.placeholder(tf.float32,shape=[3],name="shuru")
b = tf.reduce_prod(a)   # 对a做累乘
c = tf.reduce_sum(a)    # 累加
d = tf.add(b,c)
with tf.Session() as sess:
    out = sess.run(d, feed_dict={
    
    a:[2,3,4]})
print(out)

变量:后续需要不断更新的,一般用于权重和偏置

# 变量
weight = tf.Variable(2, name="quanzhong")
weight = tf.Variable(tf.random_normal([128,10]), name="quanzhong")
weight = tf.Variable(tf.truncated_normal([128,10]), name="quanzhong")   # 更加规整,不会偏离中心值
bias = tf.Variable(tf.zeros([10]))
# 运行时必需要先初始化变量
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

线性回归

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
data_x = np.linspace(0,10,30)   # 从0到10之间取30个数字
data_y = data_x*3 +7 + np.random.normal(0,1,30)
print(data_x)
print(data_y)

# plt.scatter(data_x, data_y)
# plt.show()

# 1、定义参数
# 2、输入训练参数
# 3、执行推断
# 4、计算损失
# 5、训练模型
# 6、评估
w = tf.Variable(1., name="quanzhong")   # 1会被识别为int32,1.会被识别为float32
b = tf.Variable(0., name="pianzhi")
x = tf.placeholder(tf.float32, shape=None)  # None:任意形状。[None]:一维,任意个。[None,3]:任意行,3列
y = tf.placeholder(tf.float32, shape=[None])
pred = tf.multiply(x,w) + b     # 预测值
loss = tf.reduce_sum(tf.squared_difference(pred, y))   # squared_difference计算预测值pred和实际值y的差平方,reduce_sum计算总和并降成标量
# 梯度下降
learn_rate = 0.0001
train_step = tf.train.GradientDescentOptimizer(learn_rate).minimize(loss)   # 定义学习率,最小化损失值。
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10000):
    sess.run(train_step, feed_dict={
    
    x:data_x, y:data_y})
    if i % 1000 == 0:
        print(sess.run([loss,w,b],feed_dict={
    
    x:data_x, y:data_y}))  # 每次run都要指定feed_dict
# print出现NaN值说明学习速率可能过快
# 预测
print(sess.run(12*w + b))

对数几率回归

回答“是”和“否”的问题
用到sigmoid函数
对于分类问题,损失函数会用到交叉熵

import tensorflow as tf
import pandas as pd
import numpy as np
# tf.nn.softmax_cross_entropy_with_logits

data = pd.read_csv("../../data/tt/train.csv")

data = data[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]   # [[]],dataframe类型
data = data.fillna(0)   # 将空值用0填充
x = pd.factorize(data.Sex)
data['Sex'] = pd.factorize(data.Sex)[0]   # 将字符串映射成数字,取第一种情况

# 对Pclass船票独热编码
data['p1'] = np.array(data['Pclass']==1).astype(np.float32)
data['p2'] = np.array(data['Pclass']==2).astype(np.float32)
data['p3'] = np.array(data['Pclass']==3).astype(np.float32)
del data['Pclass']      #删除原Pclass列

# data.Embarked.unique()  #查看船舱位置

# 对Embarked船舱独热编码
data['e1'] = np.array(data['Embarked']=='S').astype(np.float32)
data['e2'] = np.array(data['Embarked']=='C').astype(np.float32)
data['e3'] = np.array(data['Embarked']=='Q').astype(np.float32)
del data['Embarked']

# 转成np.array形式
data_data = np.stack([data.Sex.values.astype(np.float32),data.Age.values.astype(np.float32),data.SibSp.values.astype(np.float32),
                      data.Parch.values.astype(np.float32),data.Fare.values.astype(np.float32),
                      data.p1.values,data.p2.values,data.p3.values,
                      data.e1.values,data.e2.values,data.e3.values]).T   # .T转置。np.stack:沿着新轴连接数组的序列。

data_target = np.reshape(data.Survived.values.astype(np.float32),(891,1))
# print(np.shape(data.Survived.values.astype(np.float32)))    # 这是一维的
# print(np.shape(data_target))  # 转换之后变为二维


# 定义网络
x = tf.placeholder("float", shape=[None,11])
y = tf.placeholder("float", shape=[None,1])

weight = tf.Variable(tf.random_normal([11, 1]))
bias = tf.Variable(tf.random_normal([1]))
output = tf.matmul(x,weight) + bias       # 这里用matmul,代表矩阵相乘
pred = tf.cast(tf.sigmoid(output)>0.5,tf.float32)     # 先用sigmoid转成概率,判断是否>0.5(此时返回的是bool型),tf.cast转为数值。

loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y,logits=output))    # 使用交叉熵作损失函数。直接输入真实值y和输出值output,内含sigmoid处理。reducemean计算平均值

train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)

# 计算正确率,因为pred和y已经是0,1.所以直接判断是否相等。true=1.0,false=0.0
accuracy = tf.reduce_mean(tf.cast(tf.equal(pred,y), tf.float32))

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(10000):
      for n in range(len(data_target)//100):    # 每隔100行输入一次
            index = np.random.permutation(len(data_target)) # 顺序打乱
            data_data = data_data[index]
            data_target = data_target[index]    # 使用打乱后的数据,泛化能力更好
            batch_xs = data_data[n:n+100]
            batch_ys = data_target[n:n+100]
            sess.run(train_step,feed_dict={
    
    x:batch_xs, y:batch_ys})
      if i%1000 == 0:
            print(sess.run((loss,accuracy), feed_dict={
    
    x:batch_xs, y:batch_ys}))


# 读取test并处理。
data_test = pd.read_csv("../../data/tt/test.csv")
data_test = data_test.fillna(0)
data_test['Sex'] = pd.factorize(data_test.Sex)[0]
data_test['p1'] = np.array(data_test['Pclass']==1).astype(np.float32)
data_test['p2'] = np.array(data_test['Pclass']==2).astype(np.float32)
data_test['p3'] = np.array(data_test['Pclass']==3).astype(np.float32)
data_test['e1'] = np.array(data_test['Embarked']=='S').astype(np.float32)
data_test['e2'] = np.array(data_test['Embarked']=='C').astype(np.float32)
data_test['e3'] = np.array(data_test['Embarked']=='Q').astype(np.float32)
test_data = np.stack([data_test.Sex.values.astype(np.float32),data_test.Age.values.astype(np.float32),data_test.SibSp.values.astype(np.float32),
                      data_test.Parch.values.astype(np.float32),data_test.Fare.values.astype(np.float32),
                      data_test.p1.values,data_test.p2.values,data_test.p3.values,
                      data_test.e1.values,data_test.e2.values,data_test.e3.values]).T

# 对测试集进行评估
test_label = pd.read_csv("../../data/tt/gender.csv")
test_label = np.reshape(test_label.Survived.values.astype(np.float32),(418,1))
print(sess.run(accuracy, feed_dict={
    
    x:test_data, y:test_label}))

多分类

用tf.nn.softmax()
优化时用 tf.train.AdamOptimizer(0.0005).minimize(loss)。推荐使用AdamOptimizer,优化较好

#-- coding:utf-8 --
import numpy as np
import requests
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf

'''
r = requests.get('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')
with open("../../data/iris.data", "w") as f:
    f.write(r.text)
'''

# 数据提取
data = pd.read_csv("../../data/iris.data", names=['e_cd','e_kd','b_cd','b_kd','cat']) # 花萼长度、宽度,花瓣长度、宽度,分类
# print(data)

# sns.pairplot(data)
# plt.show()


# print(data.cat.unique())
data['c1'] = np.array(data['cat'] == 'Iris-setosa').astype(np.float32)  # true=1.0,false=0.0
data['c2'] = np.array(data['cat'] == 'Iris-versicolor').astype(np.float32)
data['c3'] = np.array(data['cat'] == 'Iris-virginica').astype(np.float32)
target = np.stack([data.c1.values, data.c2.values, data.c3.values]).T
shuju = np.stack([data.e_cd.values, data.e_kd.values, data.b_cd.values, data.b_cd.values]).T
print(np.shape(shuju), np.shape(target))

# 定义网络
x = tf.placeholder("float", shape=[None,4])     # 先不指定每次输入的样本个数
y = tf.placeholder("float", shape=[None,3])
weight = tf.Variable(tf.truncated_normal([4,3]))
bias = tf.Variable(tf.truncated_normal([3]))
combine_input = tf.matmul(x,weight) + bias  # 矩阵相乘用matmul
pred = tf.nn.softmax(combine_input)
# print(y.get_shape(),combine_input.get_shape())    3 3
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=combine_input))  #同样使用交叉熵,因为也是分类问题。

# 准确度
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y, 1)) # argmax取向量中最大值的索引  [0.5,0.3,0.2]->最大值索引为1
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

train_step = tf.train.AdamOptimizer(0.0005).minimize(loss)  # 推荐使用AdamOptimizer,优化较好

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(20000):
    index = np.random.permutation(len(target))  # 每次循环时都打乱顺序,使泛化型更好
    shuju = shuju[index]    # 改变数据顺序
    target = target[index]
    sess.run(train_step,feed_dict={
    
    x:shuju, y:target})
    if i%1000 == 0:
        print(sess.run((loss,accuracy), feed_dict={
    
    x:shuju, y:target}))

CNN

卷积完成的是对图像特征的提取或信息匹配,当一个包含某些特征的图像经过一个卷积核,一些卷积核被激活,输出特定信号。
架构:
卷积层
非线性变换层
池化层
全连接层
在这里插入图片描述
在这里插入图片描述
很少用strides跨度进行降采样。

在这里插入图片描述
在这里插入图片描述
实现:
sofmax版:

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
'''
print((len(mnist.train.images), len(mnist.train.labels)))
print(len(mnist.test.images),len(mnist.test.labels))
# print(mnist.train.images[0])
plt.imshow(mnist.train.images[1].reshape(28,28))
plt.show()
'''

x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
loss = -tf.reduce_sum(y_*tf.log(y))  # 直接用交叉熵定义来运算
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10000):
    batch = mnist.train.next_batch(50)
    sess.run(train_step, feed_dict={
    
    x:batch[0], y_:batch[1]})
    if i%50 == 0:
        print(sess.run(loss, feed_dict={
    
    x: batch[0], y_:batch[1]}))

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
acc = sess.run(accuracy, feed_dict={
    
    x: mnist.test.images, y_: mnist.test.labels})
print(acc)

CNN版:

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])

x_image = tf.reshape(x, [-1,28,28,1])   # -1:可以根据批次自动计算出。

conv2d_1 = tf.contrib.layers.convolution2d(
    x_image,
    num_outputs=32, #channel
    kernel_size = (5,5,),   #卷积核
    activation_fn = tf.nn.relu,
    stride = (1,1), # 高度和宽度的跨度
    padding="SAME",
    trainable=True)  # 是否训练参数

pool_1 = tf.nn.max_pool(conv2d_1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")  # 宽度和高度都降低一倍

# print(conv2d_1.get_shape())

conv2d_2 = tf.contrib.layers.convolution2d(
    pool_1,
    num_outputs=64, #channel
    kernel_size = (5,5,),   #卷积核
    activation_fn = tf.nn.relu,
    stride = (1,1), # 高度和宽度的跨度
    padding="SAME",
    trainable=True)  # 是否训练参数

pool_2 = tf.nn.max_pool(conv2d_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")  # 宽度和高度都降低一倍

# 全连接,首先要扁平化处理
pool2_flat = tf.reshape(pool_2, [-1, 7*7*64])
fc_1 = tf.contrib.layers.fully_connected(pool2_flat, 1024, activation_fn=tf.nn.relu)
keep_prob = tf.placeholder("float")
fc1_drop = tf.nn.dropout(fc_1, keep_prob)   # 从前面的参数中随机抽取一部分计算。防止过拟合。

fc_2 = tf.contrib.layers.fully_connected(fc1_drop, 10, activation_fn=tf.nn.softmax)

loss = -tf.reduce_sum(y_*tf.log(fc_2))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(20000):
    batch = mnist.train.next_batch(50)
    sess.run(train_step,feed_dict={
    
    x: batch[0], y_: batch[1], keep_prob: 0.5})
    if i%100 == 0:
        print(sess.run(loss, feed_dict={
    
    x: batch[0], y_: batch[1], keep_prob: 0.5}))

correct_prediction = tf.equal(tf.argmax(fc_2, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
acc = sess.run(accuracy, feed_dict={
    
    x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1})
print(acc)

猜你喜欢

转载自blog.csdn.net/Saker__/article/details/108740785
今日推荐