python 人工神经网络模型

运行环境:win10 64位 py 3.6 pycharm 2018.1.1
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_iris

#感知机学习算法的原始形式
#给出生成线性可分数据集的生成算法
def create_data(n):
    np.random.seed(1)
    x_11 = np.random.randint(0,100,(n,1))
    x_12 = np.random.randint(0,100,(n,1,))
    x_13 = 20 + np.random.randint(0, 10, (n, 1,))
    x_21 = np.random.randint(0,100,(n,1))
    x_22 = np.random.randint(0,100,(n,1))
    x_23 = 10 - np.random.randint(0, 10, (n,1,))

    new_x_12 = x_12*np.sqrt(2)/2 - x_13*np.sqrt(2)/2 #沿x轴旋转45度
    new_x_13 = x_12*np.sqrt(2)/2 + x_13*np.sqrt(2)/2 #沿x轴旋转45度
    new_x_22 = x_22*np.sqrt(2)/2 - x_23*np.sqrt(2)/2 #沿x轴旋转45度
    new_x_23 = x_22*np.sqrt(2)/2 + x_23*np.sqrt(2)/2 #沿x轴旋转45度

    plus_samples = np.hstack([x_11,new_x_12,new_x_13,np.ones((n,1))])
    minus_samples = np.hstack([x_21,new_x_22,new_x_23,-np.ones((n,1))])
    samples = np.vstack([plus_samples, minus_samples])
    np.random.shuffle(samples)
    return samples
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_iris

#感知机学习算法的原始形式
#给出生成线性可分数据集的生成算法
def create_data(n):
    np.random.seed(1)
    x_11 = np.random.randint(0,100,(n,1))
    x_12 = np.random.randint(0,100,(n,1,))
    x_13 = 20 + np.random.randint(0, 10, (n, 1,))
    x_21 = np.random.randint(0,100,(n,1))
    x_22 = np.random.randint(0,100,(n,1))
    x_23 = 10 - np.random.randint(0, 10, (n,1,))

    new_x_12 = x_12*np.sqrt(2)/2 - x_13*np.sqrt(2)/2 #沿x轴旋转45度
    new_x_13 = x_12*np.sqrt(2)/2 + x_13*np.sqrt(2)/2 #沿x轴旋转45度
    new_x_22 = x_22*np.sqrt(2)/2 - x_23*np.sqrt(2)/2 #沿x轴旋转45度
    new_x_23 = x_22*np.sqrt(2)/2 + x_23*np.sqrt(2)/2 #沿x轴旋转45度

    plus_samples = np.hstack([x_11,new_x_12,new_x_13,np.ones((n,1))])
    minus_samples = np.hstack([x_21,new_x_22,new_x_23,-np.ones((n,1))])
    samples = np.vstack([plus_samples, minus_samples])
    np.random.shuffle(samples)
    return samples
#绘制数据集的函数
def plot_samples(ax,samples):
    Y = samples[:, -1]
    position_p = Y == 1  #正类位置
    position_m = Y == -1 #负类位置
    print(position_p)
    print(position_m)
    ax.scatter(samples[position_p, 0],samples[position_p,1],samples[position_p,2],marker='+',label='+',color='b')
    ax.scatter(samples[position_m, 0],samples[position_m,1],samples[position_m,2],marker='^',label='-',color='y')

fig = plt.figure()
ax = Axes3D(fig)
data = create_data(100)
plot_samples(ax,data)
ax.legend(loc='best')
plt.show()
#感知机学习算法的原始形式算法的函数
def perceptron(train_data,eta,w_0,b_0):
    x = train_data[:,:-1]
    y = train_data[:,-1]
    length = train_data.shape[0]
    w = w_0
    print('w_0')
    print(w)
    b = b_0
    print('b')
    print(b)
    step_num = 0
    while True:
        i = 0
        while(i<length): #遍历一轮样本集中所有样本
            step_num += 1
            print('x[i]')
            print(x[i])
            print('x.shape[1]')
            print(x.shape[1])
            x_i = x[i].reshape((x.shape[1],1))
            print('x_i')
            print(x_i)
            y_i = y[i]
            print('y[i]')
            print(y_i)
            if y_i*(np.dot(np.transpose(w),x_i)+b) <= 0:
                w = w + eta * y_i * x_i #梯度下降
                b = b + eta * y_i   #梯度下降
                break
            else:
                i = i + 1
        if(i == length): #没有误分类点
            break
    return (w,b,step_num)
#绘制分离超平面
def creat_hyperplane(x,y,w,b):
    return (-w[0][0]*x - w[1][0]*y - b)/w[2][0]
data = create_data(100)
eta, w_0, b_0 = 0.1, np.ones((3,1),dtype=float),1
w, b, num = perceptron(data, eta ,w_0,b_0)

fig = plt.figure()
plt.suptitle('perceptron')
ax = Axes3D(fig)

plot_samples(ax, data)

x = np.linspace(-30,100,100)
y = np.linspace(-30,100,100)
x,y = np.meshgrid(x, y)
z = creat_hyperplane(x,y,w,b)
ax.plot_surface(x,y,z,rstride=1,cstride=1,color='g',alpha=0.2)
ax.legend(loc='best')
plt.show()

猜你喜欢

转载自blog.csdn.net/dingming001/article/details/80957947
今日推荐