tensorflow(4)——逻辑回归

学习《Tensorflow入门教程》记录 

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

mnist = input_data.read_data_sets('data/', one_hot=True)  #读取数据集

#设置参数
numClasses = 10  #输出类别数
inputSize = 784  #输入数据大小28*28*1
trainingIterations = 50000  #迭代次数
batchSize = 64  #一次迭代多少个

#指定X和y的大小
X = tf.placeholder(tf.float32, shape = [None, inputSize])
y = tf.placeholder(tf.float32, shape = [None, numClasses])

#参数初始化
W1 = tf.Variable(tf.random_normal([inputSize, numClasses], stddev=0.1))  #随机取出服从正态分布的784*10个数
B1 = tf.Variable(tf.constant(0.1), [numClasses])  #得到一个10维的常量0.1

#构造模型
y_pred = tf.nn.softmax(tf.matmul(X, W1) + B1)   #把构造的函数传入softmax分类器中

loss = tf.reduce_mean(tf.square(y - y_pred))   # 以预估值y_pred和实际值y之间的均方误差作为损失
opt = tf.train.GradientDescentOptimizer(learning_rate = .05).minimize(loss)  # 采用梯度下降法来优化参数

correct_prediction = tf.equal(tf.argmax(y_pred,1), tf.argmax(y,1))  #预测值和真实值是否相等
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  #计算精度

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

for i in range(trainingIterations):
    batch = mnist.train.next_batch(batchSize)
    batchInput = batch[0]
    batchLabels = batch[1]
    _, trainingLoss = sess.run([opt, loss], feed_dict={X: batchInput, y: batchLabels})
    if i%1000 == 0:
        train_accuracy = accuracy.eval(session=sess, feed_dict={X: batchInput, y: batchLabels})
        print ("step %d, training accuracy %g"%(i, train_accuracy))

结果是:

step 0, training accuracy 0.125
step 1000, training accuracy 0.515625
step 2000, training accuracy 0.75
step 3000, training accuracy 0.875
step 4000, training accuracy 0.828125
step 5000, training accuracy 0.90625
step 6000, training accuracy 0.84375
step 7000, training accuracy 0.875
step 8000, training accuracy 0.859375
step 9000, training accuracy 0.90625
step 10000, training accuracy 0.921875
step 11000, training accuracy 0.890625
step 12000, training accuracy 0.84375
step 13000, training accuracy 0.84375
step 14000, training accuracy 0.90625
step 15000, training accuracy 0.9375
step 16000, training accuracy 0.84375
step 17000, training accuracy 0.921875
step 18000, training accuracy 0.859375
step 19000, training accuracy 0.84375
step 20000, training accuracy 0.9375
step 21000, training accuracy 0.859375
step 22000, training accuracy 0.921875
step 23000, training accuracy 0.90625
step 24000, training accuracy 0.953125
step 25000, training accuracy 0.875
step 26000, training accuracy 0.890625
step 27000, training accuracy 0.828125
step 28000, training accuracy 0.84375
step 29000, training accuracy 0.90625
step 30000, training accuracy 0.890625
step 31000, training accuracy 0.953125
step 32000, training accuracy 0.84375
step 33000, training accuracy 0.875
step 34000, training accuracy 0.9375
step 35000, training accuracy 0.953125
step 36000, training accuracy 0.90625
step 37000, training accuracy 0.875
step 38000, training accuracy 0.890625
step 39000, training accuracy 0.90625
step 40000, training accuracy 0.921875
step 41000, training accuracy 0.859375
step 42000, training accuracy 0.890625
step 43000, training accuracy 0.921875
step 44000, training accuracy 0.90625
step 45000, training accuracy 0.9375
step 46000, training accuracy 0.890625
step 47000, training accuracy 0.84375
step 48000, training accuracy 0.921875
step 49000, training accuracy 0.921875

猜你喜欢

转载自blog.csdn.net/huhuandk/article/details/86240815