Networks with Linear Activation Function

import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

For Linear Separable Problem

#input
X = np.array([[1,0,0],
              [1,0,1],
              [1,1,0],
              [1,1,1]])
#label
Y = np.array([-1,1,1,1])
#weights vector
W = np.random.random(3)
#learning rate
lr = 0.01

#single perceptron
def update():
    global X,Y,W,lr
    A = np.dot(X,W)
    W += lr*np.dot(Y-A,X)

#run
for _ in range(1000):
    update()

#positive samples
x1 = [0,1,1]
y1 = [1,0,1]
#negative samples
x2 = [0]
y2 = [0]

xdata = np.linspace(-0.5,1.5)
slope = -W[1]/W[2]
intercept = -W[0]/W[2]

plt.figure()
plt.plot(xdata, slope*xdata+intercept, 'k-')
plt.plot(x1,y1,'yo')
plt.plot(x2,y2,'go')
plt.show()


这里写图片描述

For Linear Inseparable Problem (XOR)

#input (introduce non-linear variables, such as x1^2, x1*x2, x2^2)
X = np.array([[1,0,0,0,0,0],
              [1,0,1,0,0,1],
              [1,1,0,1,0,0],
              [1,1,1,1,1,1]])
#label
Y = np.array([-1,1,1,-1])
#weights vector
W = np.random.random(6)
#learning rate
lr = 0.01

#single perceptron
def update():
    global X,Y,W,lr
    A = np.dot(X,W)
    W += lr*np.dot(Y-A,X)

#run
for _ in range(1000):
    update()

#positive samples
x1 = [0,1]
y1 = [1,0]
#negative samples
x2 = [0,1]
y2 = [0,1]

xdata = np.linspace(-0.5,1.5)

def get_root(W,x):
    a = W[5]
    b = W[2]+W[4]*x
    c = W[0]+W[1]*x+W[3]*x*x
    return ((-b+np.sqrt(b*b-4*a*c))/(2*a),(-b-np.sqrt(b*b-4*a*c))/(2*a))

plt.figure()
plt.plot(xdata, get_root(W,xdata)[0], 'k-')
plt.plot(xdata, get_root(W,xdata)[1], 'k-')
plt.plot(x1,y1,'yo')
plt.plot(x2,y2,'go')
plt.show()


这里写图片描述

猜你喜欢

转载自blog.csdn.net/u012841922/article/details/78959153