cifar10,简单设计的6层卷积,2层全连接,3个池化网络模型,初手小白搭建,测试集误差不好只达到75%,训练集误差99%,作为学习记录,有问题欢迎大家纠正

**https://blog.csdn.net/RicardoSuzaku/article/details/77640647
cifar10的读数据借鉴的该篇文章,实现了基本的功能,没有做数据增强或其他优化操作,用的adamoptimizer,优化器,tensorflow1.8 Python2.7 Ubuntu16.04,系统** ,tensorflow的venv虚拟环境运行,1060GPU运算

这里写代码片

-- coding: utf-8 -

from future import absolute_import
from future import division
from future import print_function
import os
os.environ[‘CUDA_VISIBLE_DEVICES’] = “0”

import numpy as np
import pandas as pd
import tensorflow as tf
import cPickle

train_data = {b’data’:[], b’labels’:[]}
for i in range(5):
with open(“cifar-10-python/cifar-10-batches-py/data_batch_” + str(i + 1), ‘rb’) as file:
data = cPickle.load(file)
train_data[b’data’] += list(data[b’data’])
train_data[b’labels’] += data[b’labels’]

加载测试数据

with open(“cifar-10-python/cifar-10-batches-py/test_batch”, ‘rb’) as file:
test_data = cPickle.load(file)

def weight_variable(name,shape):
# initial = tf.random_normal(shape,stddev=0.01)
initial=tf.get_variable(name=name,shape=shape,initializer=tf.keras.initializers.he_normal())
return initial
# tf.Variable(initial)

def bias_variable(shape):
initial = tf.constant(0.0001, shape = shape)
return tf.Variable(initial)

def conv2d(x,W):
return tf.nn.conv2d(x, W, strides = [1,1,1,1], padding = ‘SAME’)

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=’SAME’)

x=tf.placeholder(tf.float32,[None,3072])
y_=tf.placeholder(tf.float32,[None,10])
x_image = tf.reshape(x, [-1, 3, 32, 32]) # 对输入进行reshape,转换成3*32*32格式
x_image = tf.transpose(x_image, [0, 2, 3, 1])# 转置操作,转换成滤波器做卷积所需格式:32*32*3,32*32为为其二维卷积操作维度,

第一层卷积 3*3*3, 32个 输出32*32*32(same卷积)

W_conv1=weight_variable(‘w1’,[3,3,3,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)

h_pool1=max_pool_2x2(h_conv1)

第二层卷积 3*3*32,32个 一个池化 输出16*16*32

W_conv2=weight_variable(‘w2’,[3,3,32,32])
b_conv2=bias_variable([32])
h_conv2=tf.nn.relu(conv2d(h_conv1,W_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)

第三层 3*3*32, 64个 一个池化 输出8*8*64

W_conv3=weight_variable(‘w3’,[3,3,32,64])
b_conv3=bias_variable([64])
h_conv3=tf.nn.relu(conv2d(h_pool2,W_conv3)+b_conv3)
h_pool3=max_pool_2x2(h_conv3)

第四层 3*3*64 64个 输出8*8*64

W_conv4=weight_variable(‘w4’,[3,3,64,64])
b_conv4=bias_variable([64])
h_conv4=tf.nn.relu(conv2d(h_pool3,W_conv4)+b_conv4)

第五层 3*3*64 128个 输出8*8*128

W_conv5=weight_variable(‘w5’,[3,3,64,128])
b_conv5=bias_variable([128])
h_conv5=tf.nn.relu(conv2d(h_conv4,W_conv5)+b_conv5)

第6层 3*3*128 128个 一个池化 输出4*4*128

W_conv6=weight_variable(‘w6’,[3,3,128,128])
b_conv6=bias_variable([128])
h_conv6=tf.nn.relu(conv2d(h_conv5,W_conv6)+b_conv6)
h_pool6=max_pool_2x2(h_conv6)

全连接层 1

W_fc1=weight_variable(‘w7’,[4*4*128,64])
b_fc1=bias_variable([64])

flatten=tf.reshape(h_pool6,[-1,4*4*128])
h_fc1=tf.nn.relu(tf.matmul(flatten,W_fc1)+b_fc1)

keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

全连接层2

W_fc2=weight_variable(‘w8’,[64,10])
b_fc2=bias_variable([10])

y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy = tf.reduce_mean(cross_entropy)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,”float”))

batch_size=100
saver = tf.train.Saver()#定义Saver,保存训练好的模型参数

开始训练,并保存模型参数

with tf.Session() as sess:
initial=tf.global_variables_initializer()
sess.run(initial)

对数据范围为0-255的训练数据做归一化处理使其范围为0-1,并将list转成numpy向量

x_train = np.array(train_data[b'data'])/255

将训练输出标签变成one_hot形式并将list转成numpy向量

y_train = np.array(pd.get_dummies(train_data[b'labels']))
# print(x_train)
# print(y_train)

对数据范围为0-255的测试数据做归一化处理使其范围为0-1,并将list转成numpy向量

x_test = np.array(test_data[b'data']) / 255

将测试输出标签变成one_hot形式并将list转成numpy向量

y_test = np.array(pd.get_dummies(test_data[b'labels']))


for i in range(40000):
    start=i * batch_size % 50000
    _,loss,acc = sess.run([train_step,cross_entropy,accuracy],feed_dict={x: x_train[start: start + batch_size],
                                            y_: y_train[start: start + batch_size], keep_prob: 0.5}) 
    if i%100==0:  #输出精度 损失值
        print('step %d trainning accuracy %g,loss value is %g' % (i,acc,loss))
    if i%1000==0:
        print("test accuracy %g" % accuracy.eval(feed_dict={x: x_test[0:1000], y_: y_test[0:1000], keep_prob: 1.0}))

save_path = saver.save(sess,"./model/model.ckpt")#  模型储存的位置(也就是得到训练好的参数)
print("Save to path:",save_path)
print("test accuracy %g" % accuracy.eval(feed_dict={x: x_test[1000:3000], y_: y_test[1000:3000], keep_prob: 1.0}))#保留全部结果,没有减少神经元

猜你喜欢

转载自blog.csdn.net/m0_37192554/article/details/81381743