吴恩达第二课第三周编程作业

本次作业目的

学习TensorFlow

下载地址

链接:https://pan.baidu.com/s/1imMIYrTCVoiF_H_sT_x5Vg 
提取码:5tja 

代码

#本次作业的目的
# 初始化变量
# 建立一个会话
# 训练的算法
# 实现一个神经网络
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
import tf_utils
import time

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

np.random.seed(1)
#线性函数
def linear_function():
    np.random.seed(1)

    X=np.random.randn(3,1)
    W=np.random.randn(4,3)
    b=np.random.randn(4,1)

    Y=tf.add(tf.matmul(W,X),b)

    sess=tf.Session()
    result=sess.run(Y)
    sess.close()
    return result
#计算sigmiod
def sigmiod(z):
    x=tf.placeholder(tf.float32,name='x')
    sigmiod=tf.sigmoid(x)
    sess=tf.Session()
    result=sess.run(sigmiod,feed_dict={x:z})
    sess.close()
    return result
#独热矩阵
def one_hot_matrix(lables,C):
    """
    :param lables: 标签向量
    :param C:分类数
    :return:独热矩阵
    """
    C=tf.constant(C,name='C')
    one_hot_matrix=tf.one_hot(indices=lables,depth=C,axis=0)
    sess=tf.Session()
    one_hot=sess.run(one_hot_matrix)
    sess.close()
    return one_hot
#初始化为1
def ones(shape):
    ones=tf.ones(shape)
    sess=tf.Session()
    ones=sess.run(ones)
    sess.close()
    return ones

#构建网络
X_train_orig , Y_train_orig , X_test_orig , Y_test_orig , classes = tf_utils.load_dataset()#加载数据集
#一列为一个样本
X_train_flatten=X_train_orig.reshape(X_train_orig.shape[0],-1).T
X_test_flatten=X_test_orig.reshape(X_test_orig.shape[0],-1).T
#归一化
X_train=X_train_flatten/255
X_test=X_test_flatten/255
#获得独热矩阵
Y_train=tf_utils.convert_to_one_hot(Y_train_orig,6)
Y_test=tf_utils.convert_to_one_hot(Y_test_orig,6)
#创建placeholders
def create_placeholders(n_x,n_y):
    """
      参数:
          n_x - 一个实数,图片向量的大小(64*64*3 = 12288)
          n_y - 一个实数,分类数(从0到5,所以n_y = 6)
      返回:
          X - 一个数据输入的占位符,维度为[n_x, None],dtype = "float"
          Y - 一个对应输入的标签的占位符,维度为[n_Y,None],dtype = "float"
      """
    X=tf.placeholder(tf.float32,[n_x,None],name='X')
    Y=tf.placeholder(tf.float32,[n_y,None],name='Y')
    return X,Y
#初始化参数
def initialize_parameters():
    np.random.seed(1)
    W1=tf.get_variable('W1',[25,12288],initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b1=tf.get_variable('b1',[25,1],initializer=tf.zeros_initializer())
    W2=tf.get_variable('W2',[12, 25],initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b2 = tf.get_variable('b2', [12, 1], initializer=tf.zeros_initializer())
    W3 = tf.get_variable('W3', [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b3 = tf.get_variable('b3', [6, 1], initializer=tf.zeros_initializer())
    parameters={
        'W1':W1,
        'W2':W2,
        'W3':W3,
        'b1':b1,
        'b2':b2,
        'b3':b3
    }
    return parameters
#前向传播
def forward_propagation(X,parameters):
    W1=parameters['W1']
    b1=parameters['b1']
    W2=parameters['W2']
    b2=parameters['b2']
    W3=parameters['W3']
    b3=parameters['b3']

    Z1=tf.add(tf.matmul(W1,X),b1)
    A1=tf.nn.relu(Z1)
    Z2=tf.add(tf.matmul(W2,A1),b2)
    A2=tf.nn.relu(Z2)
    Z3=tf.add(tf.matmul(W3,A2),b3)

    return Z3
#计算成本
def compute_cost(Z3,Y):
    logits=tf.transpose(Z3)
    labels=tf.transpose(Y)
    cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))
    return cost
#模型构建
def model(X_train,Y_train,X_test,Y_test,learning_rate=0.0001,num_epochs=1500
          ,minibatch_size=32,print_cost=True,is_plot=True):
    ops.reset_default_graph()
    tf.set_random_seed(1)
    seed=3
    (n_x,m)=X_train.shape
    n_y=Y_train.shape[0]
    costs=[]

    X,Y=create_placeholders(n_x,n_y)#给X,Y创建placeholders
    parameters=initialize_parameters()#初始化
    Z3=forward_propagation(X,parameters)#前向传播
    cost=compute_cost(Z3,Y)#计算成本
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)#反向传播,使用Adam优化
    init=tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(num_epochs):
            epoch_cost=0#每代的成本
            num_minibatches=int(m/minibatch_size)#minibatch的数量
            seed=seed+1
            minibatches=tf_utils.random_mini_batches(X_train,Y_train,minibatch_size,seed)
            for minibatch in minibatches:
                (minibatch_X,minibatch_Y)=minibatch
                _,minibatch_cost=sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})
                epoch_cost=epoch_cost+minibatch_cost/num_minibatches
            if epoch%5==0:
                costs.append(epoch_cost)
                if print_cost and epoch%100==0:
                    print("epoch = " + str(epoch) + "    epoch_cost = " + str(epoch_cost))
        if is_plot:
            plt.plot(np.squeeze(costs))
            plt.ylabel('cost')
            plt.xlabel('iterations (per tens)')
            plt.title("Learning rate =" + str(learning_rate))
            plt.show()

            # 保存学习后的参数
        parameters = sess.run(parameters)
        print("参数已经保存到session。")

        # 计算当前的预测结果
        correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))

        # 计算准确率
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        print("训练集的准确率:", accuracy.eval({X: X_train, Y: Y_train}))
        print("测试集的准确率:", accuracy.eval({X: X_test, Y: Y_test}))

        return parameters
#开始时间
start_time = time.clock()
#开始训练
parameters = model(X_train, Y_train, X_test, Y_test)
#结束时间
end_time = time.clock()
#计算时差
print("CPU的执行时间 = " + str(end_time - start_time) + " 秒" )

参考地址:https://blog.csdn.net/u013733326/article/details/79971488

发布了19 篇原创文章 · 获赞 3 · 访问量 1410

猜你喜欢

转载自blog.csdn.net/qq_41705596/article/details/93114138