minist图片识别

首先,下载官网自带数据集:

import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist = input_data.read_data_sets("路径名", one_hot=True)

接着将下载好的数据解码为图片:

from PIL import Image
import struct


def read_image(filename):
  f = open(filename, 'rb')


  index = 0
  buf = f.read()


  f.close()


  magic, images, rows, columns = struct.unpack_from('>IIII' , buf , index)
  index += struct.calcsize('>IIII')


  for i in range(images):
  #for i in xrange(2000):
    image = Image.new('L', (columns, rows))


    for x in range(rows):
      for y in range(columns):
        image.putpixel((y, x), int(struct.unpack_from('>B', buf, index)[0]))
        index += struct.calcsize('>B')


    print ('save ' + str(i) + 'image')
    image.save('保存路径' + str(i) + '.png')


def read_label(filename, saveFilename):
  f = open(filename, 'rb')
  index = 0
  buf = f.read()


  f.close()


  magic, labels = struct.unpack_from('>II' , buf , index)
  index += struct.calcsize('>II')
  
  labelArr = [0] * labels
  #labelArr = [0] * 2000


  for x in range(labels):
  #for x in xrange(2000):
    labelArr[x] = int(struct.unpack_from('>B', buf, index)[0])
    index += struct.calcsize('>B')


  save = open(saveFilename, 'w')


  save.write(','.join(map(lambda x: str(x), labelArr)))
  save.write('\n')


  save.close()
  print ('save labels success')


if __name__ == '__main__':
  read_image('./t10k-images.idx3-ubyte')
  read_label('./t10k-labels.idx1-ubyte', './label.txt')

然后这里我训练了100张图片,迭代了100次,所以准确率上可能会不是很好:

#minist
import  tensorflow as tf
import numpy as np
import time
import os
import matplotlib.image as mgimg
from sklearn.preprocessing import OneHotEncoder
def read_label():
    label=[]
    with open("./pic/test/label.txt","r") as f:
        for labels in f:
            for one in labels:
                if one !="," and one !="\n":
                    label.append(int(one))
    return label
def read_pic_data(path,i):
        data = []
        full_path = path + '%d'%i + ".png"
        data = mgimg.imread(full_path)
        return data #28*28
label1=read_label()
label=np.array(label1)
label=label.reshape(-1,1)
label.astype("float32")
label=OneHotEncoder().fit_transform(label).todense()
data=[]
path="路径"
for i in range(100):
        Data=read_pic_data(path,i)
        data.append(Data)
data=np.array(data)
data.astype("float32")
data=data.reshape(-1,28,28,1)

data1=[]
for i in range(100,200):
        Data=read_pic_data(path,i)
        data1.append(Data)
data1=np.array(data1)
data1.astype("float32")
data1=data1.reshape(-1,28,28,1)

x=tf.placeholder(tf.float32,shape=[None,28,28,1])
y_actual=tf.placeholder(tf.float32,shape=[None,10])
#构建网络
#卷积
w1=tf.Variable(tf.truncated_normal(shape=[5,5,1,32], stddev=0.1))
b1=tf.Variable(tf.constant(0.1, shape=[32]))
#卷积+激励
conv1=tf.nn.relu(tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='SAME')+b1)
#池化
pool1=tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')
w2=tf.Variable(tf.truncated_normal(shape=[5,5,32,64], stddev=0.1))
b2=tf.Variable(tf.constant(0.1, shape=[64]))
#卷积+激励
conv2=tf.nn.relu(tf.nn.conv2d(pool1, w2, strides=[1, 1, 1, 1], padding='SAME')+b2)
#池化
pool2=tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')
#第一个全连接
w_f1=tf.Variable(tf.truncated_normal(shape=[7*7*64,1024], stddev=0.1))
b_f1=tf.Variable(tf.constant(0.1, shape=[1024]))
h_pool1=tf.reshape(pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool1,w_f1)+b_f1)

#全连接层
w_f2=tf.Variable(tf.truncated_normal(shape=[1024,10], stddev=0.1))
b_f2=tf.Variable(tf.constant(0.1, shape=[10]))
pre=tf.nn.softmax(tf.matmul(h_fc1,w_f2)+b_f2)
#交叉熵loss
loss=-tf.reduce_sum(y_actual*tf.log(pre))
#梯度下降
train_step = tf.train.AdamOptimizer(1e-3).minimize(loss)
#准确率
cor = tf.equal(tf.argmax(pre,1), tf.argmax(y_actual,1))
accuracy = tf.reduce_mean(tf.cast(cor, tf.float32))

def train():
    saver=tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver=tf.train.Saver(max_to_keep=0)
        model_path=r"./train2"
        for epoch in range(100):
            train=sess.run(train_step,feed_dict={x:data,y_actual:label[0:100]})
            if(epoch%10==0):
                res=sess.run(accuracy,feed_dict={x:data,y_actual:label[0:100]})
                print("epoch=",epoch," module:",res)
            if(epoch==99):
                output=sess.run(pre,feed_dict={x:data})
                save_path=saver.save(sess,model_path)
                print("epoch=100",epoch)
def test():
    saver=tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess,tf.train.latest_checkpoint("."))
        #res=sess.run(accuracy,feed_dict={x:data,y_actual:label[0:100]})
        #print("model:",res)
        output=sess.run(pre,feed_dict={x:data1})
        start=100
        for a in output:
            lit=[]
            for b in a:
                lit.append(b)
            print(lit.index(max(lit))," ",label1[start])
            start+=1
        
#train()
test()

猜你喜欢

转载自blog.csdn.net/by_side_with_sun/article/details/79951297
今日推荐