TensorFlow-逻辑回归模型

# -*- coding: utf-8 -*-
"""
Created on Tue Mar  6 10:17:49 2018

@author: 李慧泽
"""
#解决分类问题最普遍的baseline modle就是逻辑回归,简单可解释性好
#第一步:环境设定
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time
#第二步:数据读取
#使用TensorFlow自带的工具加载MNIST手写数字集合
mnist=input_data.read_data_sets('/data/mnist',one_hot=True)
#查看数据维度
mnist.train.images.shape
#查看target维度
mnist.train.labels.shape

#第三步:准备好placeholder
batch_size=128#一批数据大小
X=tf.placeholder(tf.float32,[batch_size,784],name='X_placeholder')
Y=tf.placeholder(tf.float32,[batch_size,10],name='Y_placeholder')
#第四步:准备好参数和权重
w=tf.Variable(tf.random_normal(shape=[784,10],stddev=0.01),name='weights')
b=tf.Variable(tf.zeros([1,10]),name='bias')
#第五步:拿到每个类别的score,矩阵乘法。
logits=tf.matmul(X,w)+b
#第六步:计算多分类softmax的loss function
#求交叉熵损失
entropy=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y,name='loss')
#求平均
loss=tf.reduce_mean(entropy,name='avg_loss')
#第七步:准备好optimizer,这里的最优化用的是随机梯度下降,我们可以选择AdmOptimizer这个优化器
learning_rate=0.01
#最优化用的是随机梯度下降,我们选择AdamOptimizer优化器
optimizer=tf.train.AdamOptimizer(learning_rate).minimize(loss)
#第八步:在session里执行grph里定义的运算
#迭代总轮次
n_epochs=30
with tf.Session() as sess:
    #在tensorboard里可以看到图的结构
    writer=tf.summary.FileWriter('./graphs/logistic_reg',sess.graph)
    start_time=time.time()
    sess.run(tf.global_variables_initializer())
    n_batches=int(mnist.train.num_examples/batch_size)
    for i in range(n_epochs):
        total_loss=0
        for j in range(n_batches):
            X_batch,Y_batch=mnist.train.next_batch(batch_size)
            _,loss_batch=sess.run([optimizer,loss],feed_dict={X:X_batch,Y:Y_batch})
            total_loss+=loss_batch
        print('Average loss epoch{0}:{1}'.format(i,total_loss/n_batches))
    print('Total time:{0} seconds'.format(time.time()-start_time))
    print('Optimization Finished!')
    #测试模型
    preds=tf.nn.softmax(logits)
    correct_preds=tf.equal(tf.argmax(preds,1),tf.argmax(Y,1))
    accuracy=tf.reduce_sum(tf.cast(correct_preds,tf.float32))
    n_batches=int(mnist.train.num_examples/batch_size)
    total_correct_preds=0
    for i in range(n_batches):
        X_batch,Y_batch=mnist.test.next_batch(batch_size)
        accuracy_batch=sess.run([accuracy],feed_dict={X:X_batch,Y:Y_batch})
        total_correct_preds += accuracy_batch[0]
    print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))
    writer.close()

猜你喜欢

转载自blog.csdn.net/qq_41424519/article/details/81741149