用Java语言处理人工智能导论数据集

人工智能导论第三次作业

1.RNN

数据集:datasets/sohu.csv

模型:Keras 的 Sequential 模型

过程:

  • 数据读取和分词
for i in data['text']:
    i = jieba.cut(i)
data['text'] = data['text'].apply(lambda x:' '.join(jieba.cut(x)))
max_features = 10000
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(data['text'])
list_tokenized_train = tokenizer.texts_to_sequences(data['text'])

from keras.preprocessing.sequence import pad_sequences
len = 100
x = pad_sequences(list_tokenized_train, maxlen = len)
y = data['label']
  • 分离训练集和测试集
from sklearn.model_selection import train_test_split
x1_train, x1_test, y1_train, y1_test = train_test_split(x, y, test_size=0.3)
y1_train_onehot = np_utils.to_categorical(y1_train)   #转为二元矩阵
y1_test_onehot = np_utils.to_categorical(y1_test)
  • 搭建模型并训练
model = Sequential()
model.add(Embedding(max_features, embed_size))
model.add(Bidirectional(LSTM(32, return_sequences = True)))
model.add(GlobalMaxPool1D())
model.add(Dense(20, activation="relu"))
model.add(Dropout(0.05))
model.add(Dense(3,activation="sigmoid"))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x1_train, y1_train_onehot, batch_size=batch_size, epochs=epochs, validation_split=0.2)
accuracy = model.evaluate(x1_test, y1_test_onehot, batch_size = 50)
  • 评估模型
accuracy = model.evaluate(x1_test, y1_test_onehot, batch_size = 50)
print("test accuracy:{}".format(accuracy[1]))
>>>
1271/1271 [==============================] - 1s 572us/step
test accuracy:0.9645948123969401

2.CNN

数据集:datasets/cifa-10-batches-py

模型:Keras 的 Sequential 模型

过程:

  1. 数据预处理
def unpickle(file):
      with open(file, 'rb') as fo:
              dict = pickle.load(fo, encoding='bytes')
      return dict
# 训练集文件
train_data_list,train_label_list = [],[]

train_files = ['data_batch_'+str(i) for i in range(1,6)]
for f in train_files:
    fpath = 'datasets/cifar-10-batches-py/' + f
    batch_dict = unpickle(fpath)
# 这个字典是这样的: 三个键值对,第一个是标记第几个训练集一共有5个训练集,第二个是标记图片属于0-9,第三个是10000*3072的数组你懂得
    batch_data = batch_dict[b'data']
    batch_labels = batch_dict[b'labels']
    train_data_list.append(batch_data)
    train_label_list.append(batch_labels)

X_train = np.concatenate(train_data_list, axis = 0)
y_train = np.concatenate(train_label_list, axis = 0)

# 测试集文件
test_batch = unpickle('datasets/cifar-10-batches-py/test_batch')
X_test = np.array(test_batch[b'data'])
y_test = np.array(test_batch[b'labels']) # list type

# 再来一通处理
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
  1. 模型构建与编译与训练再与测试
# 模型构建与编译
def base_model(opt):
    model = Sequential()
    
    # 32个卷积核(feature maps),步长为1,特征图的大小不会改变(周边补充空白),
    model.add(Conv2D(32,(3,3), padding="same", input_shape=X_train.shape[1:]))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(32,(3,3)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    # channel是在前面 (theano后台)
    MaxPooling2D(pool_size=(2, 2), data_format="channels_first")
    model.add(Dropout(0.25))
    
    # 64个卷积核
    model.add(Conv2D(64,(3,3),padding="same"))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(64,(3,3)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    MaxPooling2D(pool_size=(2, 2), data_format="channels_first")
    model.add(Dropout(0.25))
    
    model.add(Flatten())   # Flatten layer
    model.add(Dense(512))  # fully connected layer with 512 units
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    model.add(Dense(num_classes)) # Fully connected output layer with 10 units
    model.add(Activation('softmax')) # softmax activation function
    
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy']) # 要优化的是准确率
    return model
# 初始化 RMSprop 优化器
opt1 = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# 初始化 Adam 优化器
# opt2 = keras.optimizers.Adam(lr=0.0001)
# 用RMSProp训练模型
cnn2 = base_model(opt1)
cnn2.summary() # 打印网络结构及其内部参数
# 进行100轮批次为32的训练,默认训练过程中会使用正则化防止过拟合            
history = cnn2.fit(X_train[:1000], y_train[:1000], 
                    epochs = 100, batch_size = 32, 
                    validation_data=(X_test[:200],y_test[:200]), 
                    shuffle=True)
score2 = cnn2.evaluate(X_test,y_test)
print("损失值为{0:.2f},准确率为{1:.2%}".format(score2[0],score2[1]))
  1. 模型评估
def plot_loss_and_accuracy(history):
    # Plots for training and testing process: loss and accuracy
 
    plt.figure(0)
    plt.plot(history.history['acc'],'r')
    plt.plot(history.history['val_acc'],'g')
    plt.xticks(np.arange(0, 101, 20))
    plt.rcParams['figure.figsize'] = (10, 8)
    plt.xlabel("Num of Epochs")
    plt.ylabel("Accuracy")
    plt.title("Training Accuracy vs Validation Accuracy")
    plt.legend(['train','validation'])
     
     
    plt.figure(1)
    plt.plot(history.history['loss'],'r')
    plt.plot(history.history['val_loss'],'g')
    plt.xticks(np.arange(0, 101, 20))
    plt.rcParams['figure.figsize'] = (10, 8)
    plt.xlabel("Num of Epochs")
    plt.ylabel("Loss")
    plt.title("Training Loss vs Validation Loss")
    plt.legend(['train','validation']
    plt.show()
plot_loss_and_accuracy(history)


from sklearn.metrics import classification_report, confusion_matrix
Y_pred = cnn2.predict(X_test, verbose=2)
y_pred = np.argmax(Y_pred, axis=1)

cm = confusion_matrix(np.argmax(y_test,axis=1),y_pred)

# Visualizing of confusion matrix
import seaborn as sn
import pandas  as pd
 
df_cm = pd.DataFrame(cm, range(10),range(10))
plt.figure(figsize = (10,7))
sn.set(font_scale=1.4)#for label size
sn.heatmap(df_cm, 
            annot=True,
            annot_kws={
    
    "size": 12})# font size
plt.show()

3.GAN

数据集:datasets/mnist

模型:Keras 的 Sequential 模型

过程:

  • 数据集
from tensorflow.examples.tutorials.mnist import input_data 
  • 生成器与判别器
# 生成器
def get_generator(noise_img, n_units, out_dim, reuse=False, alpha=0.01):
    with tf.variable_scope("generator", reuse=reuse):
        hidden1 = tf.layers.dense(noise_img, n_units)
        hidden1 = tf.maximum(alpha * hidden1, hidden1)
        hidden1 = tf.layers.dropout(hidden1, rate=0.2)
        logits = tf.layers.dense(hidden1, out_dim)
        outputs = tf.tanh(logits)
        return logits, outputs
# 判别器
def get_discriminator(img, n_units, reuse=False, alpha=0.01):
    with tf.variable_scope("discriminator", reuse=reuse):
        hidden1 = tf.layers.dense(img, n_units)
        hidden1 = tf.maximum(alpha * hidden1, hidden1)
        
        logits = tf.layers.dense(hidden1, 1)
        outputs = tf.sigmoid(logits)
        
        return logits, outputs
  • 训练
# 开始训练
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for e in range(epochs):
        for batch_i in range(mnist.train.num_examples//batch_size):
            batch = mnist.train.next_batch(batch_size)
            batch_images = batch[0].reshape((batch_size, 784))
            # 对图像像素进行scale,这是因为tanh输出的结果介于(-1,1),real和fake图片共享discriminator的参数
            batch_images = batch_images*2 - 1            
            # generator的输入噪声
            batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
            # Run optimizers
            _ = sess.run(d_train_opt, feed_dict={
    
    real_img: batch_images, noise_img: batch_noise})
            _ = sess.run(g_train_opt, feed_dict={
    
    noise_img: batch_noise})       
        # 每一轮结束计算loss
        train_loss_d = sess.run(d_loss, 
                                feed_dict = {
    
    real_img: batch_images, 
                                             noise_img: batch_noise})
        # real img loss
        train_loss_d_real = sess.run(d_loss_real, 
                                     feed_dict = {
    
    real_img: batch_images, 
                                                 noise_img: batch_noise})
        # fake img loss
        train_loss_d_fake = sess.run(d_loss_fake, 
                                    feed_dict = {
    
    real_img: batch_images, 
                                                 noise_img: batch_noise})
        # generator loss
        train_loss_g = sess.run(g_loss, 
                                feed_dict = {
    
    noise_img: batch_noise})            
        print("Epoch {}/{}...".format(e+1, epochs),
              "Discriminator Loss: {:.4f}(Real: {:.4f} + Fake: {:.4f})...".format(train_loss_d, train_loss_d_real, train_loss_d_fake),
              "Generator Loss: {:.4f}".format(train_loss_g))    
        # 记录各类loss值
        losses.append((train_loss_d, train_loss_d_real, train_loss_d_fake, train_loss_g)        
        # 抽取样本后期进行观察
        sample_noise = np.random.uniform(-1, 1, size=(n_sample, noise_size))
        gen_samples = sess.run(get_generator(noise_img, g_units, img_size, reuse=True),
                               feed_dict={
    
    noise_img: sample_noise})
        samples.append(gen_samples)      
        # 存储checkpoints
        saver.save(sess, './checkpoints/generator.ckpt')
# 将sample的生成数据记录下来
with open('train_samples.pkl', 'wb') as f:
    pickle.dump(samples, f)
  • 损失函数变化
fig, ax = plt.subplots(figsize=(20,7))
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator Total Loss')
plt.plot(losses.T[1], label='Discriminator Real Loss')
plt.plot(losses.T[2], label='Discriminator Fake Loss')
plt.plot(losses.T[3], label='Generator')
plt.title("Training Losses")
plt.legend()

  • 展示结果
def view_samples(epoch, samples):
    """
    epoch代表第几次迭代的图像
    samples为我们的采样结果
    """
    fig, axes = plt.subplots(figsize=(7,7), nrows=5, ncols=5, sharey=True, sharex=True)
    for ax, img in zip(axes.flatten(), samples[epoch][1]): # 这里samples[epoch][1]代表生成的图像结果,而[0]代表对应的logits
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)
        im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
    return fig, axes
# 加载生成器变量
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
    sample_noise = np.random.uniform(-1, 1, size=(25, noise_size))
    gen_samples = sess.run(get_generator(noise_img, g_units, img_size, reuse=True),
                           feed_dict={
    
    noise_img: sample_noise})
_ = view_samples(-1, samples) # 显示最后一轮的outputs
>>>

over

猜你喜欢

转载自blog.csdn.net/newlw/article/details/125008672