python神经网络

神经网络的原理在程序中写清楚了。

第一步:建立一个神经网络类:

import numpy as np
#激活函数tanh的求值
def tanh(x):
    return np.tanh(x)
def tanh_deri(x):
    return 1.0 - np.tanh(x) * np.tanh(x)
def logistic(x):
    return 1/(1+np.exp(-x))
def logistic_deri(x):
    return logistic(x) * (1 - logistic(x))
class Neuralwork:
    def __init__(self,layers,activation='tanh'):
        if activation=='logistic':
            self.activation=logistic
            self.activation_deriv=logistic_deri
        if activation=='tanh':
            self.activation=tanh
            self.activation_deriv=tanh_deri
            #定义一个list类去表示weights
        self.weights=[]
        for i in range(1,len(layers)-1):
            #给第一层weights随机一层
            self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
            # 对当前神经节点的后继赋值
            self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)
            print(self.weights)
    def train(self,x,y,learning_rate=0.2,number=10000):
        X=np.atleast_2d(x)
        temp=np.ones((X.shape[0],X.shape[1]+1))     #temp是比x多一列的的一个矩阵
        temp[:,0:-1]=X                              #将x赋值给少最后一列的temp
        X=temp                                      #x中的最后一列就是偏置
        y=np.array(y)
        for j in range(number):
            i=np.random.randint(X.shape[0])
            a=[X[i]]
            for l in range(len(self.weights)):      #len() 代表list的层数。
                a.append(self.activation(np.dot(a[l], self.weights[l])))
            error = y[i] - a[-1]
            deltas = [error * self.activation_deriv(a[-1])]#这个就是计算最后一层的迭代误差 # a[-1]是最后一层的值
            # 开始反向计算误差,更新权重
            for l in range(len(a) - 2, 0, -1):  # 倒数第二层起的迭代误差
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))
            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                #从第一层开始更新权重
                self.weights[i] += learning_rate * layer.T.dot(delta)
    #预测函数
    def predict(self, x):
        x = np.array(x)
        temp = np.ones(x.shape[0] + 1)
        temp[0:-1] = x
        a = temp
        for l in range(0, len(self.weights)):
            a = self.activation(np.dot(a, self.weights[l]))
        return a





第二步测试:

import numpy as np
from 神经网络 import Neuralwork

nn = Neuralwork([2,2,1], 'tanh')
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
nn.train(X, y)
for i in [[0, 0], [0, 1], [1, 0], [1,1]]:
    print(i,nn.predict(i))


猜你喜欢

转载自blog.csdn.net/qq_23859701/article/details/78998044