Tensorflow实现逻辑回归模型(Mnist数据集)

共三部分:

1.Mnist数据集的介绍与获取

2.softmax函数及Tensorflow基本语法

3.代码实现

一.Mnist数据集的介绍

简介:手写数字数据库,它有60000个训练样本集和10000个测试样本集

官网:http://yann.lecun.com/exdb/mnist/

外观:

     

Image部分:手写体的图片

label部分:图片代表着哪个数字

from tensorflow.examples.tutorials.mnist import input_data
# one_hot : 独热编码,也叫一位有效编码。在任意时候只有一位为1,其他位为0
mnist = input_data.read_data_sets("data/",one_hot = True)

train_images = mnist.train.images
train_labels = mnist.train.labels
test_images = mnist.test.images
test_labels = mnist.test.labels

print("train_images_shape:",train_images.shape)
print("train_labels_shape:",train_labels.shape)
print("test_images_shape:",test_images.shape)
print("test_labels_shape:",test_labels.shape)
print("train_images:",train_images[0])
print("train_images_length:",len(train_images[0]))
print("train_labels:",train_labels[0])

二.逻辑回归使用到的Tensorflow语法

softmax函数

1.逻辑回归(处理二分类问题)

2.softmax(处理多分类的问题)

   

import tensorflow as tf
import numpy as np
# 占位符,适用于不知道具体参数的时候
x = tf.placeholder(tf.float32,shape=(4,4))
y = tf.add(x,x)
#[1,32,44,56]
#[89,12,90,33]
#[35,69,1,10]
argmax_paramter = tf.Variable([[1,32,44,56],[89,12,90,33],[35,69,1,10]])

#最大的列值索引值
argmax_0 = tf.argmax(argmax_paramter,0)
#最大的行值索引值
argmax_1 = tf.argmax(argmax_paramter,1)
#平均数
reduce_0 = tf.reduce_mean(argmax_paramter,reduction_indices=0)
reduce_1 = tf.reduce_mean(argmax_paramter,reduction_indices=1)
#是否相等
equal_0 = tf.equal(1,2)
equal_1 = tf.equal(2,2)
#类型转换
cast_0 = tf.cast(equal_0,tf.int32)
cast_1 = tf.cast(equal_1,tf.float32)

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)

    rand_array = np.random.rand(4,4)
    print(sess.run(y,feed_dict={x:rand_array}))

    print("argmax_0:",sess.run(argmax_0))
    print("argmax_1:",sess.run(argmax_1))
    print("reduce_0:",sess.run(reduce_0))
    print("reduce_1:",sess.run(reduce_1))
    print("equal_0:",sess.run(equal_0))
    print("equal_1:",sess.run(equal_1))
    print("cast_0:",sess.run(cast_0))
    print("cast_1:",sess.run(cast_1))
    

三.逻辑回归的代码实现

import tensorflow as tf
#导入数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/",one_hot=Ture)
#变量
batch_size = 100
#训练的x(image),y(label)
# x = tf.Variable()
# y = tf.Variable()
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#模型权重
#[55000,784]*w = [55000,10]
w = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
#用softmax构建逻辑回归模型
pred = tf.nn.softmax(tf.matmul(x,w) + b)
#损失函数(交叉熵)
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),1))
#梯度下降
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
#初始化所有变量值
init = tf.global_variables_initializer()

#加载session图
with tf.Session() as sess:
    sess.run(init)
    #开始训练
    for epoch in range(25):
        avg_cost=0
        total_batch = int(mnist.train.num_examples/batch_size)
        for i in range(total_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(optimizer,{x:batch_xs,y:batch_ys})
            #计算损失平均值
            avg_cost += sess.run(cost,{x:batch_xs,y:batch_ys})/total_batch
        if (epoch+1) % 5 == 0:
            print("Epoch:",'%04d' % (epoch+1),"cost=","{:.9f}".format(avg_cost))
    print("运行完成")
    #测试求正确率
    correct = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct,tf.float32))
    print("正确率:",accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))

猜你喜欢

转载自blog.csdn.net/qq_40108803/article/details/83025541