清华AI自强计划作业3-Part1

版权声明:作者:巴伐利亚酒仙 https://blog.csdn.net/weixin_39062811/article/details/86652729

清华AI自强计划作业3-Part1

关于Incompatible shapes: [30,784] vs. [784,1]错误

今天在完成MNIST数据集进行简单的分类训练的作业时,遇到了这个问题,搞了一会,知道错误在哪,特地写一下心得注明一下。

第三次作业Part1

以下是修改错误之前的源码

from tensorflow.examples.tutorials.mnist import input_data   # 导入 tensorflow 中的 input_data 子模块,目的是为了后续的导入读取数据
import tensorflow as tf # 导入 tensorflow 库,并且重名为 tf, 便于后面的简写 tf 
import numpy as np  # 导入 numpy 库,并且重名为 np, 便于后面的简写 np

#基本参数设置
batchSize = 30   #batchsize的大小,代表每次训练载入的图像张数
lr = 0.005       #学习率的大小,若后面启用learning rate decay策略,则该值为学习率的初始值
iter = 1000000   #训练的迭代次数
saveInter = 100  #保存结果的频率,即每训练100次保存一次模型训练参数及模型性能
sample_size = 55000  #学习example的总大小,MNIST中官方写60000张,实际为55000(训练)+ 5000(校验),本例中只使用了55000 train

# 对模型输出的结果进行评判,>0.5为“正”,<0.5为“负”
def predict(X):   # 定义一个函数 predict, 作用是用来进行预测
    num = X.shape[0]  # 通过 shape 属性,得到 X 行的个数
    result = [] # 定义一个空的列表 result ,后面通过 append 的方式,向里面添加元素
    for i in range(num):  # for循环语句, i 从0,1,2, 到 num -1
        if X[i]>0.5: # 如果 X[i] 大于 0.5
            result.append(1.0) # 将 1.0 添加到列表 result 中
        else: # 否则,X[i] 小于或等于 0.5
            result.append(0.0)  # 将 0.0 添加到列表 result 中
    return result # 返回 result 的结果

# 加载数据集,建议提前到官网上下载MNIST数据集,并解压到./MNIST文件夹下
# MNIST下载地址:http://yann.lecun.com/exdb/mnist/
def loadData(): # 定义一个 loadData 函数
    file = "../MNIST" # 数据集 MINIST 
    mnist = input_data.read_data_sets(file, one_hot=True)  # input_data.read_data_sets 读取数据
    return mnist # 返回读取的数据 mnist

# 申请模型输入输出的占位符
def create_placeholder(n_x=784,n_y=0): # 定义一个 create_placeholder  函数
    X = tf.placeholder(tf.float32,shape=[None,n_x],name='X')   # 调用tf.placeholder函数,tensorflow 中定义 X
    Y = tf.placeholder(tf.float32, shape=[None,], name='Y')  # 调用tf.placeholder函数,tensorflow 中定义 Y
    return X,Y  #返回 X 和 Y 的数值

# 定义参数,W,b
def initialize_parameters(): # 定义一个 initialize_parameters 函数
    W = tf.Variable(tf.zeros([784,1]))  #调用tf.Variable函数,设置模型参数W,W的维度为[784,1],且初始化为0
    b = tf.Variable(tf.zeros([1,1]))  #调用tf.Variable函数,设置模型参数b,b的维度为[1  ,1],且初始化为0
    parameters={'W': W,  # 参数权重 W
                'b': b}  # 参数偏置 b
    return parameters  # 返回参数

# 将标签转换为one-hot形式,本例中未用到该函数,是因为tensorflow中封装了one-hot功能
def convert_one_hot(Y,C):  # 定义一个 convert_one_hot 函数
    one_hot=np.eye(C)[Y.reshape(-1)].T  # 初始化 one_hot 为对角矩阵
    return one_hot  # 返回 one_hot 

# 定义网络模型
def forward_propagation(X,parameters):  # 定义一个 forward_propagation 函数
    W = parameters['W']  # 参数权重 W 
    b = parameters['b']  # 参数偏置 b

    Z1=(X*W)+b #调用tensorflow函数,实现Z1=X*W+b
    A1=tf.nn.sigmoid(Z1)  #调用tf.nn.sigmoid,实现A1 = sigmoid(Z1)
    A1 = tf.clip_by_value(A1,1e-3,1.0)  #调用clip_by_value,将A1进行裁剪,使其在[0.001,1.0]之间,是为了避免出现接近于0的极小值,输入np.log()中出现nan的情况
    return A1 # 返回 A1

# 定义loss function
def compute_cost(y_,y,W):  # 定义一个 compute_cost 函数
    #以下的cross_entropy经过了简单变化,在(1.0-y_)*tf.log(1.0-y)之前乘以0.1,是因为正负样本比例基本上为1:9,严重偏向负样本
    #以下添加了正则,也可以尝试去掉
    cross_entropy = -(1.0/batchSize)*tf.reduce_sum(y_*tf.log(y)+0.1*(1.0-y_)*tf.log(1.0-y)+tf.contrib.layers.l2_regularizer(0.1)(W)) #调用tf.reduce_sum函数,实现交叉熵函数
    return cross_entropy   # 返回 交叉熵函数 的数值 cross_entropy 

# 模型搭建、训练、存储
def model(mnist,Num): # 定义一个  model 函数
    x,y_ = create_placeholder(784, 0) # 调用 create_placeholder 函数,初始化  x,y_ 
    parameters = initialize_parameters() # 调用 initialize_parameters 函数, 初始化 参数
    A1 = forward_propagation(x, parameters)   # 调用 forward_propagation 函数,实现前向反馈

    #设置learning rate decay策略,随着迭代次数的增加,学习率成指数逐渐减小,减小公式为:decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
    global_step = tf.Variable(0)  # 调用  tf.Variable 函数, 初始化 global_step 变量
    learning_rate = tf.train.exponential_decay(lr,global_step,decay_steps=sample_size/batchSize,decay_rate=0.98,staircase=True) # 设置指数衰减的 学习率,调用tf.train.exponential_decay。
    
    cost = compute_cost(y_, A1,parameters['W']) # 调用 compute_cost 函数,计算损失函数
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost,global_step=global_step) # 调用 tf.train.GradientDescentOptimizer 函数, 实现梯度下降的优化
    sess = tf.InteractiveSession()   #调用tf.InteractiveSession()函数,创建Session
    sess.run(tf.global_variables_initializer()) #执行tf.global_variables_initializer(),初始化参数
    
    #利用全部样本对模型进行测试
    testbatchX = mnist.train.images  # 导入 mnist 数据中的训练集 图片
    testbatchY = mnist.train.labels  # 导入 mnist 数据中的训练集 标签
     
    modelLast = []  # 定义一个空的列表 modelLast 
    logName = "./log"+str(Num)+".txt" # 新建文件名为  log"+str(Num)+".txt
    
    #保存模型,且设定保存最大迭代次数的4个
    saver = tf.train.Saver(max_to_keep=4)  # 调用 tf.train.Saver 函数,保存模型
    pf = open(logName, "w") # 以 写入的方式 打开文件  log"+str(Num)+".txt
    for i in range(iter): # for 循环结构, 遍历 iter
        #加载minibatch=50个训练样本
        batch = mnist.train.next_batch(batchSize) # 调用  mnist.train.next_batch 函数,复制给 batch
        batchX = batch[0] # 赋值给 batchX 为 batch 中第一个元素
        batchY = batch[1] # 赋值给 batchY为 batch 中第二个元素
        #执行训练
        train_step.run(feed_dict={x:batchX, y_:batchY[:,Num]})  #执行tensor流图,并为其添加输入x: batchX, y_: batchY[:,Num]

        #每隔saveInter次迭代,保存当前模型的状态,并测试模型精度
        if i % saveInter == 0:  #条件判断语句 if, 如果 i 整除 iter
            [total_cross_entropy,pred,Wsum,lrr] = sess.run([cost,A1,parameters['W'],learning_rate],feed_dict={x:batchX,y_:batchY[:,Num]}) # 调用 sess.run, 启动 tensoflow
            pred1 = predict(pred)  # 调用 predict 函数,进行预测
            
            #保存当前模型的学习率lr、在minibatch上的测试精度
            print('lr:{:f},train Set Accuracy: {:f}'.format(lrr,(np.mean(pred1 == batchY[:,Num]) * 100))) # 输出训练集的准确率等
            pf.write('lr:{:f},train Set Accuracy: {:f}\n'.format(lrr,(np.mean(pred1 == batchY[:,Num]) * 100))) # 写入训练集的准确率
 
            #保存迭代次数、cross entropy
            print("handwrite: %d, iterate times: %d , cross entropy:%g"%(Num,i,total_cross_entropy)) # 输出迭代次数,交叉熵损失函数等
            pf.write("handwrite: %d, iterate times: %d , cross entropy:%g, W sum is: %g\n" %(Num,i,total_cross_entropy,np.sum(Wsum))) # 写入出迭代次数,交叉熵损失函数等
            
            #保存当前参数状态、测试testbatch上的精度
            [testpred] = sess.run([A1],feed_dict={x: testbatchX, y_: testbatchY[:, Num]})  # 调用 sess.run, 启动 tensoflow
            testpred1 = predict(testpred)   # 调用 predict 函数,进行预测
            print('predict sum is: {:f},Testing Set Accuracy: {:f}\n'.format(np.sum(testpred1),(np.mean(testpred1 == testbatchY[:, Num]) * 100)))  # 输出测试集的准确率等
            pf.write('predict sum is: {:f},Testing Set Accuracy: {:f}\n'.format(np.sum(testpred1),(np.mean(testpred1 == testbatchY[:,Num]) * 100))) # 写入测试集的准确率等
            pf.write("\n") # 写入换行字符
            
            #保存当前模型
            saveName = "model/my-model-" + str(Num) # 保存模型为 "model/my-model-" + str(Num)
            saver.save(sess, saveName, global_step=i) # 调用  saver.save 函数,保存模型
            pf.write("save model completed\n") # 写入 save model completed
            
            #若交叉熵出现nan(出现极值),此时停止训练,保存最新的一次模型名称
            if total_cross_entropy != total_cross_entropy: # 条件判断语句 if , 如果 total_cross_entropy 不等于 total_cross_entropy
                print("is nan, stop") # 输出 is nan, stop
                pf.write("is nan, stop\n") # 写入 is nan, stop
                modelLast = "model/my-model-" + str(Num)+str(i-saveInter) # 模型文件名为  "model/my-model-" + str(Num)+str(i-saveInter)
                break; # break 跳出循环
    pf.close() # close 关闭打开的文件 
    return modelLast # 返回 modelLast  
    
# 模型测试
def test_model(): # 定义 test_model 函数
    mnist = loadData() # 调用 loadData 函数, 导入数据 
    classNum = 10 # 类别 初始化赋值为 10 , 共有 10 类
    modelNames = [] # 定义一个空的列表 modelNames
    logName = "./logModelNames.txt" #  文件名为 logModelNames.txt
    pf = open(logName, "w") # 以写入的方式打开  logModelNames.txt
    
    #循环训练每个类别与其他类别的二分类器,保存10个分类器模型
    for i in range(classNum): # for 循环语句, 遍历所有 classNum的类别, 
        modelNames.append(model(mnist,i)) # 通过 append 的方式, 向 modelNames 里面添加 model(mnist,i)
        pf.write(modelNames[i]) # 写入 modelNames[i]
        pf.write("\n") # 写入 换行字符
    pf.close() # 关闭文件

if __name__ == '__main__': # 主程序
    test_model() # 调用 test_model 函数 
    

> 以下是错误信息 InvalidArgumentError (see above for traceback): Incompatible shapes: [30,784] vs. [784,1]

错误原因是在创建不含隐藏层的简单神经网络时tf.matmul()的参数传递问题,正确的应为 tf.matmul(x,W)
将"Z1=(X*W)+b “更改为"Z1=tf.matmul(X,W)+b”,以上代码即可运行

运行结果如下

lr:0.005000,train Set Accuracy: 16.666667
handwrite: 0, iterate times: 0 , cross entropy:4.98731
predict sum is: 55000.000000,Testing Set Accuracy: 9.898182

lr:0.005000,train Set Accuracy: 30.000000
handwrite: 0, iterate times: 100 , cross entropy:4.47434
predict sum is: 42448.000000,Testing Set Accuracy: 32.701818

lr:0.005000,train Set Accuracy: 73.333333
handwrite: 0, iterate times: 200 , cross entropy:3.06059
predict sum is: 29849.000000,Testing Set Accuracy: 55.314545

lr:0.005000,train Set Accuracy: 63.333333
handwrite: 0, iterate times: 300 , cross entropy:-0.327935
predict sum is: 32571.000000,Testing Set Accuracy: 48.732727

lr:0.005000,train Set Accuracy: 46.666667
handwrite: 0, iterate times: 400 , cross entropy:-54.8765
predict sum is: 27388.000000,Testing Set Accuracy: 55.378182

lr:0.005000,train Set Accuracy: 73.333333
handwrite: 0, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.101818

is nan, stop
lr:0.005000,train Set Accuracy: 13.333333
handwrite: 1, iterate times: 0 , cross entropy:4.49648
predict sum is: 55000.000000,Testing Set Accuracy: 11.234545

lr:0.005000,train Set Accuracy: 23.333333
handwrite: 1, iterate times: 100 , cross entropy:2.75025
predict sum is: 39640.000000,Testing Set Accuracy: 39.125455

lr:0.005000,train Set Accuracy: 60.000000
handwrite: 1, iterate times: 200 , cross entropy:2.9515
predict sum is: 27647.000000,Testing Set Accuracy: 60.712727

lr:0.005000,train Set Accuracy: 50.000000
handwrite: 1, iterate times: 300 , cross entropy:0.0803952
predict sum is: 32210.000000,Testing Set Accuracy: 52.241818

lr:0.005000,train Set Accuracy: 53.333333
handwrite: 1, iterate times: 400 , cross entropy:-81.9736
predict sum is: 30721.000000,Testing Set Accuracy: 54.490909

lr:0.005000,train Set Accuracy: 73.333333
handwrite: 1, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 88.765455

is nan, stop
lr:0.005000,train Set Accuracy: 10.000000
handwrite: 2, iterate times: 0 , cross entropy:3.94733
predict sum is: 55000.000000,Testing Set Accuracy: 9.945455

lr:0.005000,train Set Accuracy: 20.000000
handwrite: 2, iterate times: 100 , cross entropy:3.93083
predict sum is: 52604.000000,Testing Set Accuracy: 14.280000

lr:0.005000,train Set Accuracy: 66.666667
handwrite: 2, iterate times: 200 , cross entropy:2.31273
predict sum is: 25489.000000,Testing Set Accuracy: 62.140000

lr:0.005000,train Set Accuracy: 70.000000
handwrite: 2, iterate times: 300 , cross entropy:-1.51881
predict sum is: 22049.000000,Testing Set Accuracy: 66.881818

lr:0.005000,train Set Accuracy: 43.333333
handwrite: 2, iterate times: 400 , cross entropy:-59.7042
predict sum is: 29764.000000,Testing Set Accuracy: 52.989091

lr:0.005000,train Set Accuracy: 80.000000
handwrite: 2, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.054545

is nan, stop
lr:0.005000,train Set Accuracy: 13.333333
handwrite: 3, iterate times: 0 , cross entropy:4.51146
predict sum is: 55000.000000,Testing Set Accuracy: 10.250909

lr:0.005000,train Set Accuracy: 26.666667
handwrite: 3, iterate times: 100 , cross entropy:4.49274
predict sum is: 43234.000000,Testing Set Accuracy: 31.498182

lr:0.005000,train Set Accuracy: 53.333333
handwrite: 3, iterate times: 200 , cross entropy:3.11949
predict sum is: 33806.000000,Testing Set Accuracy: 47.530909

lr:0.005000,train Set Accuracy: 40.000000
handwrite: 3, iterate times: 300 , cross entropy:1.82847
predict sum is: 44129.000000,Testing Set Accuracy: 29.009091

lr:0.005000,train Set Accuracy: 60.000000
handwrite: 3, iterate times: 400 , cross entropy:-50.3269
predict sum is: 25410.000000,Testing Set Accuracy: 57.810909

lr:0.005000,train Set Accuracy: 76.666667
handwrite: 3, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 89.749091

is nan, stop
lr:0.005000,train Set Accuracy: 13.333333
handwrite: 4, iterate times: 0 , cross entropy:4.48332
predict sum is: 55000.000000,Testing Set Accuracy: 9.649091

lr:0.005000,train Set Accuracy: 96.666667
handwrite: 4, iterate times: 100 , cross entropy:4.01762
predict sum is: 3406.000000,Testing Set Accuracy: 93.980000

lr:0.005000,train Set Accuracy: 36.666667
handwrite: 4, iterate times: 200 , cross entropy:4.7305
predict sum is: 46868.000000,Testing Set Accuracy: 24.307273

lr:0.005000,train Set Accuracy: 36.666667
handwrite: 4, iterate times: 300 , cross entropy:0.819301
predict sum is: 32130.000000,Testing Set Accuracy: 50.052727

lr:0.005000,train Set Accuracy: 63.333333
handwrite: 4, iterate times: 400 , cross entropy:-63.146
predict sum is: 26222.000000,Testing Set Accuracy: 59.147273

lr:0.005000,train Set Accuracy: 76.666667
handwrite: 4, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.350909

is nan, stop
lr:0.005000,train Set Accuracy: 16.666667
handwrite: 5, iterate times: 0 , cross entropy:4.98377
predict sum is: 55000.000000,Testing Set Accuracy: 9.067273

lr:0.005000,train Set Accuracy: 56.666667
handwrite: 5, iterate times: 100 , cross entropy:2.67669
predict sum is: 24081.000000,Testing Set Accuracy: 62.716364

lr:0.005000,train Set Accuracy: 40.000000
handwrite: 5, iterate times: 200 , cross entropy:3.18606
predict sum is: 29414.000000,Testing Set Accuracy: 53.245455

lr:0.005000,train Set Accuracy: 56.666667
handwrite: 5, iterate times: 300 , cross entropy:2.15873
predict sum is: 30405.000000,Testing Set Accuracy: 51.327273

lr:0.005000,train Set Accuracy: 66.666667
handwrite: 5, iterate times: 400 , cross entropy:-39.4382
predict sum is: 28597.000000,Testing Set Accuracy: 53.967273

lr:0.005000,train Set Accuracy: 70.000000
handwrite: 5, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.932727

is nan, stop
lr:0.005000,train Set Accuracy: 96.666667
handwrite: 6, iterate times: 0 , cross entropy:2.54767
predict sum is: 0.000000,Testing Set Accuracy: 90.150909

lr:0.005000,train Set Accuracy: 26.666667
handwrite: 6, iterate times: 100 , cross entropy:5.00434
predict sum is: 49145.000000,Testing Set Accuracy: 20.490909

lr:0.005000,train Set Accuracy: 43.333333
handwrite: 6, iterate times: 200 , cross entropy:4.33006
predict sum is: 34103.000000,Testing Set Accuracy: 47.705455

lr:0.005000,train Set Accuracy: 70.000000
handwrite: 6, iterate times: 300 , cross entropy:1.52785
predict sum is: 21247.000000,Testing Set Accuracy: 69.029091

lr:0.005000,train Set Accuracy: 50.000000
handwrite: 6, iterate times: 400 , cross entropy:-38.3298
predict sum is: 29329.000000,Testing Set Accuracy: 54.025455

lr:0.005000,train Set Accuracy: 83.333333
handwrite: 6, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.150909

is nan, stop
lr:0.005000,train Set Accuracy: 100.000000
handwrite: 7, iterate times: 0 , cross entropy:1.65013
predict sum is: 0.000000,Testing Set Accuracy: 89.609091

lr:0.005000,train Set Accuracy: 80.000000
handwrite: 7, iterate times: 100 , cross entropy:3.27103
predict sum is: 18348.000000,Testing Set Accuracy: 76.045455

lr:0.005000,train Set Accuracy: 33.333333
handwrite: 7, iterate times: 200 , cross entropy:2.51653
predict sum is: 35404.000000,Testing Set Accuracy: 45.529091

lr:0.005000,train Set Accuracy: 43.333333
handwrite: 7, iterate times: 300 , cross entropy:0.587267
predict sum is: 39636.000000,Testing Set Accuracy: 37.401818

lr:0.005000,train Set Accuracy: 56.666667
handwrite: 7, iterate times: 400 , cross entropy:-73.8344
predict sum is: 27259.000000,Testing Set Accuracy: 57.520000

lr:0.005000,train Set Accuracy: 76.666667
handwrite: 7, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 89.609091

is nan, stop
lr:0.005000,train Set Accuracy: 13.333333
handwrite: 8, iterate times: 0 , cross entropy:4.49435
predict sum is: 55000.000000,Testing Set Accuracy: 9.798182

lr:0.005000,train Set Accuracy: 96.666667
handwrite: 8, iterate times: 100 , cross entropy:2.4589
predict sum is: 304.000000,Testing Set Accuracy: 89.921818

lr:0.005000,train Set Accuracy: 13.333333
handwrite: 8, iterate times: 200 , cross entropy:3.77693
predict sum is: 47801.000000,Testing Set Accuracy: 22.636364

lr:0.005000,train Set Accuracy: 63.333333
handwrite: 8, iterate times: 300 , cross entropy:0.305798
predict sum is: 21461.000000,Testing Set Accuracy: 66.250909

lr:0.005000,train Set Accuracy: 36.666667
handwrite: 8, iterate times: 400 , cross entropy:-45.1836
predict sum is: 27745.000000,Testing Set Accuracy: 55.883636

lr:0.005000,train Set Accuracy: 66.666667
handwrite: 8, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.201818

is nan, stop
lr:0.005000,train Set Accuracy: 100.000000
handwrite: 9, iterate times: 0 , cross entropy:1.70039
predict sum is: 0.000000,Testing Set Accuracy: 90.083636

lr:0.005000,train Set Accuracy: 16.666667
handwrite: 9, iterate times: 100 , cross entropy:4.93745
predict sum is: 53646.000000,Testing Set Accuracy: 12.334545

lr:0.005000,train Set Accuracy: 53.333333
handwrite: 9, iterate times: 200 , cross entropy:3.10768
predict sum is: 39225.000000,Testing Set Accuracy: 38.001818

lr:0.005000,train Set Accuracy: 60.000000
handwrite: 9, iterate times: 300 , cross entropy:1.00401
predict sum is: 29131.000000,Testing Set Accuracy: 54.281818

lr:0.005000,train Set Accuracy: 43.333333
handwrite: 9, iterate times: 400 , cross entropy:-69.1324
predict sum is: 27424.000000,Testing Set Accuracy: 56.832727

lr:0.005000,train Set Accuracy: 76.666667
handwrite: 9, iterate times: 500 , cross entropy:nan
predict sum is: 0.000000,Testing Set Accuracy: 90.083636

is nan, stop

参考资料:

https://tensorflow.google.cn/api_docs/python/

猜你喜欢

转载自blog.csdn.net/weixin_39062811/article/details/86652729