深度学习系列作业1----by 吴恩达

import numpy as np
import matplotlib.pyplot as plt
import h5py  # 是与数据集(存储在H5文件中的数据集)交互的常用软件包
import scipy.misc
import scipy.ndimage
from lr_utils import load_dataset
import matplotlib

"""使用logistic regression进行猫的图片分类"""
# 注意:向量一般字母大写;标量一般字母小写

################下面是数据加载与预处理过程###############

train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
index = 1
# plt.imshow(train_set_x_orig[index])
print("y=" + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode('utf-8') + "' picture.")
print('训练集样本维度信息train_set_x_orig:', train_set_x_orig.shape)  # 209张图片作为训练样本
print('训练标签维度信息train_set_y:', train_set_y.shape)
print('测试集样本维度信息test_set_x_orig:', test_set_x_orig.shape)  # 50张图片作为测试样本
print('测试标签维度信息test_set_y:', test_set_y.shape)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]  # 每张图片的高和宽是64*64
print("训练样本数量:{}".format(m_train))
print("测试样本数量:{}".format(m_test))
print("每张彩色图片的宽高信息:({} {})".format(num_px, num_px))
# 使用下面的命令将每张图片的(64,64,3)的像素值信息,转化为一个特征列向量x:(64*64*3,1)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
print('train_set_x_orig转化后的维度信息train_set_x_flatten:', train_set_x_flatten.shape)
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print('test_set_x_orig转化后的维度信息test_set_x_flatten:', test_set_x_flatten.shape)
print('第一个样本的前5个像素值:', train_set_x_flatten[0:5, 0])
# 数据预处理(归一化操作):对数据集进行中心化和标准化:每个样本减去整个numpy数组的平均值,然后将每个样本除以整个numpy数组的标准偏差
# 对于图片数据集来说,可以将数据集的每一行除以255(像素通道的最大值)。
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255

#################下面是建立神经网络的过程#####################


def advanced_sigmoid(z):
    result = 1 / (1 + np.exp(-z))
    return result
# z = np.array([0, 2])
# print(advanced_sigmoid(z))


# 参数初始化:将w参数初始化为0向量
def initialize_parameters(dim):
    w = np.zeros((dim, 1))
    b = 0
    assert (w.shape == (dim, 1))  # 加入断言语句,如果为真,不做任何事情;如果它为假,会抛出异常
    assert (isinstance(b, float) or isinstance(b, int))
    return w, b

dim = 2
w, b = initialize_parameters(dim)
# print('w = \n', w)
# print('b = ', b)


# 通过前向传播来计算损失cost,通过反向传播来计算梯度gradient
def propagate(w, b, X, Y):
    m = X.shape[1]
    A = advanced_sigmoid(np.dot(w.T, X) + b)
    cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
    dw = 1 / m * np.dot(X, (A - Y).T)
    db = 1 / m * np.sum(A - Y)

    assert(dw.shape == w.shape)
    assert(db.dtype == float)
    cost = np.squeeze(cost)  # 将shape中维度为1的去掉
    assert (cost.shape == ())

    grads = {"dw": dw, "db": db}
    return grads, cost

w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1, 2], [3, 4]]), np.array([[1, 0]])
grads, cost = propagate(w, b, X, Y)
print("dw = \n", grads['dw'])
print("db = ", grads['db'])
print("cost = ", cost)


# 梯度下降法优化参数w, b
def optimize(w, b, X, Y, iterations, learning_rate, print_cost=False):
    costs = []
    for i in range(iterations):
        grads, cost = propagate(w, b, X, Y)
        dw = grads["dw"]
        db = grads["db"]
        # 更新参数操作
        w = w - learning_rate * dw
        b = b - learning_rate * db

        if i % 100 == 0:
            costs.append(cost)
        if print_cost and i % 100 == 0:
            print("经过%d次迭代后,cost的值是%f" % (i, cost))

    params = {"w": w, "b": b}
    grads = {"dw": dw, "db": db}
    return params, grads, costs

params, grads, costs = optimize(w, b, X, Y, iterations=100, learning_rate=0.009, print_cost=False)
print("w = \n", params["w"])
print("b = ", params["b"])
print("dw = \n", grads["dw"])
print("db = ", grads["db"])
print("costs = ", costs)


#  预测部分
def predict(w, b, X):
    m = X.shape[1]
    y_prediction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)

    A = advanced_sigmoid(np.dot(w.T, X) + b)
    for i in range(A.shape[1]):
        if A[0, i] <= 0.5:
            y_prediction[0, i] = 0
        else:
            y_prediction[0, i] = 1
    assert (y_prediction.shape == (1, m))
    return y_prediction
print("y_prediction:", predict(w, b, X))


################下面将上面各个部分的函数组合成一个完整的模型#################
def model(X_train, Y_train, X_test, Y_test, iterations=1000, learning_rate=0.5, print_cost=False):
    w, b = initialize_parameters(X_train.shape[0])
    params, grads, costs = optimize(w, b, X_train, Y_train, iterations, learning_rate, print_cost)
    w = params["w"]
    b = params["b"]
    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)
    print("训练集上的准确率:{}%".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
    print("测试集上的准确率:{}%".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test,
         "Y_prediction_train": Y_prediction_train,
         "w": w,
         "b": b,
         "learning_rate": learning_rate,
         "iterations": iterations}
    return d


d = model(train_set_x, train_set_y, test_set_x, test_set_y, iterations=1000, learning_rate=0.005, print_cost=True)

# 绘制学习曲线
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('迭代次数(每100次)', fontproperties='FangSong', fontsize=18)
plt.title("Learning rate ={}".format(d["learning_rate"]))
plt.show()


# 设置不同的学习率进行对比,从而反应出学习率不同对梯度下降算法的影响
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
    print("当前的学习率是:{}".format(i))
    models[i] = model(train_set_x, train_set_y, test_set_x, test_set_y, iterations=1500, learning_rate=i, print_cost=False)
    print('-------------------分隔线-----------------------')

for i in learning_rates:
    plt.plot(np.squeeze(models[i]["costs"]), label=str(models[i]["learning_rate"]))

plt.ylabel('cost')
plt.xlabel('迭代次数', fontproperties='FangSong', fontsize=18)

legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.title("不同学习率对cost曲线的影响", fontproperties='FangSong', fontsize=18)
plt.show()


# 使用自己的图片来进行测试
my_img = 'la_defense.jpg'
fname = "images/" + my_img
image = np.array(scipy.ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image))].decode("utf-8") +  "\" picture.")
 

猜你喜欢

转载自blog.csdn.net/cdlwhm1217096231/article/details/82632213