import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v2 import *
from dnn_utils_v2 import sigmoid ,sigmoid_backward, relu, relu_backward
%matplotlib inline
#ipython 专用,在页面内内置构图
#.rcParams图片参数设定
plt.rcParams['figure.figsize'] = (5.0, 4.0) #设置默认图片大小
plt.rcParams['image.interpolation'] = 'nearest' #插值风格
plt.rcParams['image.cmap'] = 'gray' #颜色类型
%load_ext autoreload
%autoreload 2 #在执行用户代码前,重新装入 软件的扩展和模块
'''
参数
0:不执行 装入命令。
1: 只装入所有 %aimport 要装模块
2:装入所有 %aimport 不包含的模块
'''
np.random.seed(1)
导入包,设定绘图风格,设定初始随机值世界线。
def initialize_parameters(n_x,n_h,n_y):
np.random.seed(1) #没有这句得不到预期结果
W1=np.random.randn(n_h,n_x)*0.01
b1=np.zeros((n_h,1))
W2=np.random.randn(n_y,n_h)*0.01
b2=np.zeros((n_y,1))
assert(W1.shape==(n_h,n_x))
assert(b1.shape==(n_h,1))
assert(W2.shape==(n_y,n_h))
assert(b2.shape==(n_y,1))
parameters={'W1':W1,
'b1':b1,
'W2':W2,
'b2':b2}
return parameters
初始化模块参数值,当特定层的输入特征数,隐藏层单元数、 输出单元数确定时,输出随机初始的W1,W2、 b1、b2
parameters = initialize_parameters(2,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
W1 = [[ 0.01624345 -0.00611756]
[-0.00528172 -0.01072969]]
b1 = [[0.]
[0.]]
W2 = [[ 0.00865408 -0.02301539]]
b2 = [[0.]]
测试:测试结果
def initialize_parameters_deep(layer_dims):
np.random.seed(3)
parameters = {}
L = len(layer_dims)
for l in range(1, L):
parameters['W'+str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])*0.01
parameters['b'+str(l)] = np.zeros((layer_dims[l],1))
assert(parameters['W' + str(l)].shape==(layer_dims[l],layer_dims[l-1]))
assert(parameters['b' + str(l)].shape==(layer_dims[l],1))
return parameters
初始化深层神经网络参数,layer_dims为n0、n1、n2....nm数组
初始化其每一层的W和b并放入字典patameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]
[-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]
[-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]
[-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]
b1 = [[0.]
[0.]
[0.]
[0.]]
W2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716]
[-0.01023785 -0.00712993 0.00625245 -0.00160513]
[-0.00768836 -0.00230031 0.00745056 0.01976111]]
b2 = [[0.]
[0.]
[0.]]
测试通过
def linear_forward(A, W, b):
Z = np.dot(W, A) +b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z,cache
线性计算,正向传播 同时缓存A、W,b到cache中, =实际上为linear_cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
Z = [[ 3.26295337 -1.23429987]]
测试通过
def linear_activation_forward(A_prev, W, b, activation):
if activation == 'sigmoid':
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
if activation == 'relu':
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert(A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
正向激活函数传导,输入参数有A_prev,W,b,activation其中activation为选择激活函数,先进行线性运算输出和A和linear_cache(A,W,b),输出A_prev输出为经过激活后的A,并将计算过中的activation_cache中的Z存在cache中,因此此时纯输出cache中包含这一层的(A,W,b,Z)
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation='sigmoid')
print(str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W ,b, activation='relu')
print(str(A))
[[0.96890023 0.11013289]]
[[3.43896131 0. ]]
测试通过
def L_model_forward(X, parameters):
caches = []
A = X
L = len(parameters)//2
for l in range(1,L): #1-L-1层的relu激活过程
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b'+ str(l)] ,'relu')
caches.append(cache) #缓存A_prev和W和Z
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], 'sigmoid')
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
建立输出AL的正向传播模型
输入为X和经过初始化后的参数值,
建立caches保存cache并叠加,之后可通过序列读取。
在该函数中,输入通过1到L-1层的relu激活,最终通过L层的sigmoid激活输出最终预测值AL,并将过程中保存的cache集合成caches输出。
其中需要注意层数在通过索引中的序列。
X, parameters = L_model_forward_test_case()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
AL = [[0.17007265 0.2524272 ]]
Length of caches list = 2
测试通过
def compute_cost(AL, Y):
m = Y.shape[1]
for i in range(m):
cost = -1.0/ m*np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1 -Y), np.log(1 - AL)))
cost = np.squeeze(cost)
assert(cost.shape == ())
return cost
损失函数计算
Y, AL = compute_cost_test_case()
print(str(compute_cost(AL,Y)))
0.41493159961539694
测试通过
#一步的线性反向传播
def linear_backward(dZ,cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1/ m*np.dot(dZ, A_prev.T)
db = 1/m*np.sum(dZ ,axis = 1, keepdims =True)
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW,db
已知某一层的dZ(已知dZ说明该层激活函数已经确定)
通过公式计算dW,db,并计算求得下一步dA[l-1]的结果dA_prev输出
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
dA_prev = [[ 0.51822968 -0.19517421]
[-0.40506361 0.15255393]
[ 2.37496825 -0.89445391]]
dW = [[-0.10076895 1.40685096 1.64992505]]
db = [[0.50629448]]
测试通过
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
if activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
加入激活函数的单层反向传播推导
输入为dA,可能为上一步输出的dA_prev,也可能为最后输出评价函数对A的导数
cache为当前层的缓存,activation激活函数
从cache中提取linear_cache(A,W,b), activation_cache(Z)
relu和sigmoid的反向求导得到dZ
最后有linear_backward和linear_cache(A,W,b)求得 dA_prev, dW, db并返回
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
sigmoid:
dA_prev = [[ 0.11017994 0.01105339]
[ 0.09466817 0.00949723]
[-0.05743092 -0.00576154]]
dW = [[ 0.10266786 0.09778551 -0.01968084]]
db = [[-0.05729622]]
relu:
dA_prev = [[ 0.44090989 -0. ]
[ 0.37883606 -0. ]
[-0.2298228 0. ]]
dW = [[ 0.44513824 0.37371418 -0.10478989]]
db = [[-0.20837892]]
测试通过
def L_model_backward(AL, Y, caches):
grads = {}
L = len(caches) #获取layers层数
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) #损失函数的导数
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, caches[L-1],'sigmoid' )
for l in reversed(range(L-1)): #方向从L-1到1
grads["dA" + str(l+1)] , grads["dW" + str(l+1)], grads["db" + str(l+1)] = linear_activation_backward(grads["dA" + str(l+2)], caches[l],"relu")
return grads
反向传播模型的建立
由grads作为字典存储或称中求得的dA,dW,db
首先对损失函数进行求导得到dAL,由dAL和caches[L-1]也就是第L个缓存[L] sigmoid求得dA[L],dW[L],db[L]
这部分有点理解不了,dAL应该就是dA[L]???
grads["dA" + str(l+1)] , grads["dW" + str(l+1)], grads["db" + str(l+1)]应该是不同层的,存在疑问
其实dA理论上来说顺序对即可,不输出影响梯度下降
for循环反向reversed(range相反)
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dA1 = "+ str(grads["dA1"]))
dW1 = [[0.41010002 0.07807203 0.13798444 0.10502167]
[0. 0. 0. 0. ]
[0.05283652 0.01005865 0.01777766 0.0135308 ]]
db1 = [[-0.22007063]
[ 0. ]
[-0.02835349]]
dA1 = [[ 0. 0.52257901]
[ 0. -0.3269206 ]
[ 0. -0.32070404]
[ 0. -0.74079187]]
测试通过
def update_parameters(parameters, grads, learning_rate):
L = len(parameters)//2
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW"+ str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db"+ str(l+1)]
return parameters
梯度下降
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
[-1.76569676 -0.80627147 0.51115557 -1.18258802]
[-1.0535704 -0.86128581 0.68284052 2.20374577]]
b1 = [[-0.04659241]
[-1.28888275]
[ 0.53405496]]
W2 = [[-0.55569196 0.0354055 1.32964895]]
b2 = [[-0.84610769]]
测试通过