吴恩达第一课第三周编程作业

链接:https://pan.baidu.com/s/1ypHuubawEcuJyAxyRMErYg 
提取码:a9av 
 

这次的作业是建立一个2分类的神经网络

源代码:

import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
np.random.seed(1) #设置一个固定的随机种子,保证接下来的随机数是一致的。
X,Y=load_planar_dataset()#加载数据集
plt.scatter(X[0,:],X[1,:],c=np.squeeze(Y),s=40, cmap=plt.cm.Spectral)#绘制图像
# plt.show()
shape_X=X.shape
shape_Y=Y.shape
m=Y.shape[1]

# 构建神经网络的一般方法是:
# 1. 定义神经网络结构(输入单元的数量,隐藏单元的数量等)。
# 2. 初始化模型的参数
# 3. 循环:
#           实施前向传播
#           计算损失
#           实现向后传播
#           更新参数(梯度下降)
def layer_sizes(X,Y):
    """
    :param X: 输入数据
    :param Y:标签
    :return:输入层,隐藏层,输出层
    """
    n_x=X.shape[0]
    n_h=4
    n_y=Y.shape[0]
    return n_x,n_h,n_y
def initialize_parameters(n_x,n_h,n_y):
    np.random.seed(2)
    W1=np.random.randn(n_h,n_x)*0.01
    b1=np.zeros(shape=(n_h,1))
    W2=np.random.randn(n_y,n_h)*0.01
    b2=np.zeros(shape=(n_y,1))

    assert (W1.shape==(n_h,n_x))
    assert (b1.shape==(n_h,1))
    assert (W2.shape==(n_y,n_h))
    assert (b2.shape==(n_y,1))

    parameters={
        'W1':W1,
        'W2':W2,
        'b1':b1,
        'b2':b2
    }
    return parameters
def forward_propagation(X,parameters):
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    Z1=np.dot(W1,X)+b1
    A1=np.tanh(Z1)
    Z2=np.dot(W2,A1)+b2
    A2=sigmoid(Z2)

    assert (A2.shape==(1,X.shape[1]))


    cache={
        'Z1':Z1,
        'Z2':Z2,
        'A1':A1,
        'A2':A2
    }
    return A2,cache
def compute_cost(A2,Y,):
    m=Y.shape[1]
    logprobs=Y*np.log(A2)+(1-Y)*np.log(1-A2)
    cost=-np.sum(logprobs)/m
    cost=float(np.squeeze(cost))
    assert (isinstance(cost,float))
    return cost
def backward_propagation(parameters,cache,X,Y):
    A1=cache['A1']
    A2=cache['A2']
    m=Y.shape[1]
    W2=parameters['W2']

    dZ2=A2-Y
    dW2=np.dot(dZ2,A1.T)/m
    db2=np.sum(dZ2,axis=1,keepdims=True)/m
    dZ1=np.multiply(np.dot(W2.T,dZ2),1 - np.power(A1, 2))
    dW1=np.dot(dZ1,X.T)/m
    db1=np.sum(dZ1,axis=1,keepdims=True)/m

    grads={
        'dW1':dW1,
        'dW2':dW2,
        'db1':db1,
        'db2':db2
    }
    return grads
def update_parameters(parameters,grads,learning_rate=1.2):
    W1=parameters['W1']
    W2=parameters['W2']
    b1=parameters['b1']
    b2=parameters['b2']
    dW1=grads['dW1']
    dW2=grads['dW2']
    db1=grads['db1']
    db2=grads['db2']

    W1=W1-learning_rate*dW1
    b1=learning_rate*db1
    W2=W2-learning_rate*dW2
    b2=b2-learning_rate*db2

    parameters={
        'W1':W1,
        'W2':W2,
        'b1':b1,
        'b2':b2
    }
    return parameters
def nn_model(X,Y,n_h,num_iterations,print_cost=False):
    np.random.seed(3)
    n_x=layer_sizes(X,Y)[0]
    n_y=layer_sizes(X,Y)[2]
    parameters=initialize_parameters(n_x,n_h,n_y)
    for i in range(num_iterations):
        A2,cache=forward_propagation(X,parameters)
        cost=compute_cost(A2,Y)
        grads=backward_propagation(parameters,cache,X,Y)
        parameters=update_parameters(parameters,grads,learning_rate=0.5)
        if print_cost:
            if i%1000==0:
                print("第",i,"次循环,成本为:"+str(cost))
    return parameters
def predict(parameters,X):
    A2 , cache = forward_propagation(X,parameters)
    predictions = np.round(A2)
    return predictions


parameters = nn_model(X, Y, n_h = 4, num_iterations=10000, print_cost=True)
#绘制边界
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
predictions = predict(parameters, X)
print ('准确率: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')

参考:https://blog.csdn.net/u013733326/article/details/79827273

发布了19 篇原创文章 · 获赞 3 · 访问量 1415

猜你喜欢

转载自blog.csdn.net/qq_41705596/article/details/90577074