tensorflow的cnn的训练样例

# -*- coding: utf-8 -*-

from skimage import io,transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
import cv2

print tf.__version__
print tf.__path__

zimport glob
path='../images/blackImg/train/'
# print os.listdir(path)

#将所有的图片resize成100*100
w=64
h=96
c=3

def normalize(data):zz
    m = np.mean(data)
    data = data - m
    max_val = data.max()
    min_val = data.min()
    val = max(abs(max_val), abs(min_val))
    data = data / float(val)
    return data

#读取图片
def read_img(path):
    cates=[x for x in os.listdir(path) if os.path.isdir(path+x)]
    print cates
    imgs=[]
    labels=[]
    for tag in cates:
        for im in glob.glob(os.path.join(path,tag,"*.jpg")):
            image = cv2.imread(im)
            image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)
            
#             #可以考虑下归一化处理
#             image = normalize(image)
#             image = np.reshape(image,(h,w,1))
            imgs.append(image)
            labels.append(int(tag))
    
    return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)
data,label=read_img(path)
print data.shape
print label.shape

#打乱顺序
num_example=data.shape[0]
arr=np.arange(num_example)
np.random.shuffle(arr)
data=data[arr]
label=label[arr]

#将所有数据分为训练集和验证集
ratio=0.8
s=np.int(num_example*ratio)
x_train=data[:s]
y_train=label[:s]
x_val=data[s:]
y_val=label[s:]

#-----------------构建网络----------------------
#占位符
x=tf.placeholder(tf.float32,shape=[None,h,w,c],name='x')
print x
y_=tf.placeholder(tf.int32,shape=[None,],name='y_')

#第一个卷积层(96*64——>24*16)
conv1=tf.layers.conv2d(
      inputs=x,
      filters=2,
      kernel_size=[8, 8],
      padding="same",
      activation=tf.nn.relu,
      kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print "conv1:",conv1.shape
pool1=tf.layers.max_pooling2d(inputs=conv1, pool_size=[8, 8], padding="same",strides=4)
print "pool1:",pool1.shape

#第二个卷积层(24*16->6*4)
conv2=tf.layers.conv2d(
      inputs=pool1,
      filters=4,
      kernel_size=[6, 6],
      padding="same",
      activation=tf.nn.relu,
      kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print "conv2:",conv2.shape
pool2=tf.layers.max_pooling2d(inputs=conv2, pool_size=[6, 6], padding="same",strides=4)
print "pool2:",pool2.shape
#第二个卷积层(6*4->3*2)
conv3=tf.layers.conv2d(
      inputs=pool2,
      filters=2,
      kernel_size=[3, 3],
      padding="same",
      activation=tf.nn.relu,
      kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print "conv3:",conv3.shape
pool3=tf.layers.max_pooling2d(inputs=conv3, pool_size=[3, 3], padding="same",strides=2)


print "pool3:",pool3.shape
re1 = tf.reshape(pool3, [-1, 3 * 2 * 2])
print "rel:",re1.shape

#全连接层
dense1 = tf.layers.dense(inputs=re1, 
                      units=32, 
                      activation=tf.nn.relu,
                      kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                      kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
print "dense1:",dense1.shape

logits= tf.layers.dense(inputs=dense1, 
                        units=2, 
                        activation=None,
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                        kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
print "logits:",logits.shape
#---------------------------网络结束---------------------------

b = tf.constant(value=1,dtype=tf.float32)
logits_eval = tf.multiply(logits,b,name='logits_eval') 

loss=tf.losses.sparse_softmax_cross_entropy(labels=y_,logits=logits)
train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)    
acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

#定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
    assert len(inputs) == len(targets)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batch_size]
        else:
            excerpt = slice(start_idx, start_idx + batch_size)
        yield inputs[excerpt], targets[excerpt]

#训练和测试数据,可将n_epoch设置更大一些

n_epoch=10000
batch_size=320
modelDir = 'ckpt_8'
if not os.path.isdir(modelDir):
    os.makedirs(modelDir)


# tf.reset_default_graph()  
sess=tf.InteractiveSession()  
sess.run(tf.global_variables_initializer())

saver=tf.train.Saver(max_to_keep=1)

max_acc=0
f=open('%s/acc.txt'%modelDir,'w')

for epoch in range(n_epoch):
    start_time = time.time()
    
    #training
    train_loss, train_acc, n_batch = 0, 0, 0
    print 
    for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
        _,err,ac=sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a})
        train_loss += err; train_acc += ac; n_batch += 1
    print("   train loss: %f" % (train_loss/ n_batch))
    print("   train acc: %f" % (train_acc/ n_batch))
    
    #validation
    val_loss, val_acc, n_batch = 0, 0, 0
    for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
        err, ac = sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a})
        val_loss += err; val_acc += ac; n_batch += 1
    print("   validation loss: %f" % (val_loss/ n_batch))
    print("   validation acc: %f" % (val_acc/ n_batch))
    
    print('epoch:%d, val_loss:%f, val_acc:%f'%(epoch,val_loss,val_acc))
    f.write(str(epoch+1)+', val_acc: '+str(val_acc)+'\n')
    if val_acc>max_acc:
      max_acc=val_acc
      saver.save(sess,'%s/block.ckpt'%modelDir,global_step=epoch+1)
f.close()

sess.close()

猜你喜欢

转载自blog.csdn.net/sparrowwf/article/details/84146524
今日推荐