dropout解决过拟合

原理就是在第一次学习的过程中,随即忽略一些神经元和神经的链接。使得神经网络变得不完整。一次一次。。。。。每一次得出的结果不依赖某一个参数。这样就解决了过拟合问题。

import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer

#load data
digits = load_digits()
X = digits.data
y = digits.target
y = LabelBinarizer().fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3)

# 传入的参数有输入层,输入大小,输出大小,还有一个激励函数,默认是NONE(线性函数)
def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))  # 定义权重为随机变量,因为随机变量生成初始变量要比0好很多。形状是【2】【3】:2行3列
    # 机器学习推荐变量不为0.他的size是:1行our_size列
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs, Weights) + biases # matmul是矩阵的乘法。还没被激活的值存在这个变量中
    #将这个结果50%不考虑 其实就是dropout
    Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    tf.summary.histogram(layer_name + '/outputs', outputs)
    return outputs


#define placeholder for input.784个像素点
keep_prob = tf.placeholder(tf.float32)#需要定义一个参数,保持多少的如果不被drop掉
xs = tf.placeholder(tf.float32, [None, 64]) #X的是8X8的64个单位
ys = tf.placeholder(tf.float32, [None, 10]) #输出是十个单位,分别描述0123456789

#add output layer.  softmax一般是用来做分类的函数
l1 = add_layer(xs, 64, 50, 'l1',activation_function=tf.nn.tanh)
prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax)

#the error between prediction and real data.在softmax来说,这个cross_entropy算法做分类,生成分类算法
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))#loss
tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)


sess = tf.Session()
merged = tf.summary.merge_all()
#summary writer goes in here
train_writer = tf.summary.FileWriter("A://logs/train", sess.graph)
test_writer = tf.summary.FileWriter("A://logs/test", sess.graph)

#important stetp
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
    init = tf.initialize_all_variables()
else:
    init = tf.global_variables_initializer()
sess.run(init)

for i in range(500):
    sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 0.5})#一般会有50%的结果(被)drop
    if i % 50 == 0:
        #记录loss
        train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})  #记录result的时候不要drop任何东西
        test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1})
        #加载到writer,第i次学习
        train_writer.add_summary(train_result, i)
        test_writer.add_summary(test_result, i)


猜你喜欢

转载自blog.csdn.net/daxuan1881/article/details/84880878
今日推荐