利用tensorflow训练自己的数据集

昨天实现了一个简单的CNN网络。用了MNIST数据集,虽然看来对这个数据集用的很多,但是真正这个数据集是怎么在训练的时候被调用的,以及怎么把它换成自己的数据集都是一脸懵。

直接附上链接:MNIST数据集解析

作者给的代码是python2.x版本的,我用的python3.5,改了一些错误。

import numpy as np
import struct
import os
import matplotlib.pyplot as plt

import pickle
import gzip

_tag = '>' #使用大端读取
_twoBytes = 'II' #读取数据格式是两个整数
_fourBytes =  'IIII' #读取的数据格式是四个整数
_pictureBytes =  '784B' #读取的图片的数据格式是784个字节,28*28
_lableByte = '1B' #标签是1个字节
_msb_twoBytes = _tag + _twoBytes
_msb_fourBytes = _tag + _fourBytes
_msb_pictureBytes = _tag + _pictureBytes
_msb_lableByte = _tag + _lableByte

def getImage(filename = None):
	binfile = open(filename, 'rb') #以二进制读取的方式打开文件
	buf = binfile.read() #获取文件内容缓存区
	binfile.close()
	index = 0 #偏移量
	numMagic, numImgs, numRows, numCols = struct.unpack_from(_msb_fourBytes, buf, index)
	index += struct.calcsize(_fourBytes)
	images = []
	for i in range(numImgs):
		imgVal  = struct.unpack_from(_msb_pictureBytes, buf, index)
		index += struct.calcsize(_pictureBytes)

		imgVal	= list(imgVal)
		#for j in range(len(imgVal)):
		#	if imgVal[j] > 1:
		#		imgVal[j] = 1
		images.append(imgVal)
	return np.array(images)

def getlable(filename=None) :
	binfile = open(filename, 'rb')
	buf = binfile.read() #获取文件内容缓存区
	binfile.close()
	index = 0 #偏移量
	numMagic, numItems = struct.unpack_from(_msb_twoBytes,buf, index)
	index += struct.calcsize(_twoBytes)
	labels = []
	for i in range(numItems):
		value = struct.unpack_from(_msb_lableByte, buf, index)
		index += struct.calcsize(_lableByte)
		labels.append(value[0]) #获取值的内容
	return np.array(labels)

def outImg(arrX, arrY, order):
	#根据指定的order来获取集合中对应的图片和标签
	test1 = np.array([1,2,3])
	print(test1.shape)
	image = np.array(arrX[order])
	print(image.shape)
	image = image.reshape(28,28)
	label = arrY[order]
	print(label)
	outfile = str(order) + '_'+str(label) + '.png'
	plt.figure()
	plt.imshow(image, cmap="gray_r") # 在MNIST官网中有说道 “Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).”
	plt.show()
	#plt.savefig("./" + outfile) #保存图片

"""
The second method
"""
def  load_data(filename = None):
	f = gzip.open(filename, 'rb')
	training_data, validation_data, test_data = pickle.load(f,encoding='bytes')
	return (training_data, validation_data, test_data)

def test_cPickle():
	filename = 'MNIST_data/mnist.pkl.gz'
	training_data, validation_data, test_data = load_data(filename)
	print(len(test_data))
	outImg(training_data[0],training_data[1], 1000)
	#print len(training_data[1])

def test():
    trainfile_X = 'MNIST_data/train-images.idx3-ubyte'
    trainfile_y = 'MNIST_data/train-labels.idx1-ubyte'
    arrX = getImage(trainfile_X)
    arrY = getlable(trainfile_y)
    outImg(arrX, arrY, 1000)

if __name__  == '__main__':
    #test_cPickle() #test the second method
	test() #test the first method

附上百度百科中魔数的概念(magic number):

很多类型的文件,其起始几个字节的内容是固定的。根据这几个字节的内容可以确定文件的类型,因此这几个字节的内容被称为魔数。此外在一些程序代码中,程序员常常将在代码中出现但没有解释的数字常量或字符串称为魔数 (magic number)或魔字符串。

接下来训练分类自己的图片

找了好几个博客做参考,但都有很多的错误,没改好

以下代码参考博文:tensorflow(六)训练分类自己的图片(CNN超详细入门版)

我从CK+表情数据库里选了一些原图作为我的数据集

import os
import numpy as np
from PIL import Image
import tensorflow as tf
import matplotlib.pyplot as plt


def get_file(file_dir):
    angry = []
    label_angry = []
    disgusted = []
    label_disgusted = []
    fearful = []
    label_fearful = []
    happy = []
    label_happy = []
    sadness = []
    label_sadness = []
    surprised = []
    label_surprised = []

    for file in os.listdir(file_dir + '/angry'):
        angry.append(file_dir + '/angry' + '/' + file)
        label_angry.append(0)
    for file in os.listdir(file_dir + '/disgusted'):
        disgusted.append(file_dir + '/disgusted' + '/' + file)
        label_disgusted.append(1)
    for file in os.listdir(file_dir + '/fearful'):
        fearful.append(file_dir + '/fearful' + '/' + file)
        label_fearful.append(2)
    for file in os.listdir(file_dir + '/happy'):
        happy.append(file_dir + '/happy' + '/' + file)
        label_happy.append(3)
    for file in os.listdir(file_dir + '/sadness'):
        sadness.append(file_dir + '/sadness' + '/' + file)
        label_sadness.append(4)
    for file in os.listdir(file_dir + '/surprised'):
        surprised.append(file_dir + '/surprised' + '/' + file)
        label_surprised.append(5)

    print("There are %d angry\nThere are %d disgusted\nThere are %d fearful\n" %(len(angry), len(disgusted), len(fearful)),end="")
    print("There are %d happy\nThere are %d sadness\nThere are %d surprised\n" %(len(happy),len(sadness),len(surprised)))

    # 合并数据numpy.hstack(tup)
    # tup可以是python中的元组(tuple)、列表(list),或者numpy中数组(array),函数作用是将tup在水平方向上(按列顺序)合并
    image_list = np.hstack((angry, disgusted, fearful, happy, sadness, surprised))
    label_list = np.hstack((label_angry, label_disgusted, label_fearful, label_happy, label_sadness, label_surprised))
    # 转置、随机打乱
    temp = np.array([image_list, label_list])   # 转换成2维矩阵
    temp = temp.transpose()     # 转置
    # numpy.transpose(a, axes=None) 作用:将输入的array转置,并返回转置后的array
    np.random.shuffle(temp)     # 按行随机打乱顺序函数

    image_list = list(temp[:,0])    #取出第0列数据,即图片路径
    label_list = list(temp[:,1])    #取出第1列数据,即图片标签
    label_list = [int(i) for i in label_list]   # 转换成int数据类型

    return image_list, label_list


def get_batch(image,label,image_W,image_H,batch_size,capacity):
    image = tf.cast(image,tf.string)
    label = tf.cast(label,tf.int32)
    #tf.cast()用来做类型转换

    input_queue = tf.train.slice_input_producer([image,label])
    #加入队列

    label = input_queue[1]
    image_contents = tf.read_file(input_queue[0])
    image = tf.image.decode_jpeg(image_contents,channels=3)
    #jpeg或者jpg格式都用decode_jpeg函数,其他格式可以去查看官方文档

    image = tf.image.resize_image_with_crop_or_pad(image,image_W,image_H)
    #resize

    image = tf.image.per_image_standardization(image)
    #对resize后的图片进行标准化处理

    image_batch,label_batch = tf.train.batch([image,label],batch_size = batch_size,num_threads=16,capacity = capacity)

    label_batch = tf.reshape(label_batch,[batch_size])
    return image_batch,label_batch
    # 获取两个batch,两个batch即为传入神经网络的数据

BATCH_SIZE = 5
CAPACITY = 64
IMG_W = 256
IMG_H = 256


if __name__ == '__main__':
    BATCH_SIZE = 5
    CAPACITY = 64
    IMG_W = 256
    IMG_H = 256

    train_dir = 'F:/Python/PycharmProjects/DeepLearning/datasets'

    image_list, label_list = get_file(train_dir)
    image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    with tf.Session() as sess:
        i = 0
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            while not coord.should_stop() and i < 2:
                # 提取出两个batch的图片并可视化。
                img, label = sess.run([image_batch, label_batch])

                for j in np.arange(BATCH_SIZE):
                    print('label: %d' % label[j])
                    plt.imshow(img[j, :, :, :])
                    plt.show()
                i += 1
        except tf.errors.OutOfRangeError:
            print('done!')
        finally:
            coord.request_stop()
        coord.join(threads)

接下来的任务是要把昨天与今天的东西结合起来。

明天待续……

猜你喜欢

转载自blog.csdn.net/jesmine_gu/article/details/81093686