Build a neural network with numpy in python

 
 
 
 

python version 2.7

I haven't written too much for the time being, make a backup, copy and paste it into the py file to run

Build a neural network in python (without using machine learning libraries)

# coding:utf-8
import numpy as np
from matplotlib import pyplot as plt
def divDebugWindow():
    print('====================================Mysterious dividing line ======= ===========================\n')
def deBugout(nameofVAR,VAR):
    print (nameofVAR,VAR,'Shape',nameofVAR,'is',np.shape(VAR))
def datafile():
    x1=np.array([29,43,50,36,2,29,94,45,83,93,41,76,39,91,45,28,28,39,57,72,12,38,52,83,32,15,100,49,83,93,50,45,72,56,36,43,11,23,67,67,2,86,14,92,51,52,94,55,87,40,14,30,24,22,17,22,7,43,71,23,32,33,53,32,27,94,19,54,76,50,67,53,2,45,73,98,19,15,9,24,64,51,62,86,58,32,53,45,52,85,84,29,83,67,91,45,71,8,76,99]);
    x2=np.array([54,47,65,79,56,80,55,65,85,67,67,42,45,56,21,67,72,50,33,2,\
                 81,55,66,13,30,63,55,7,39,19,61,43,35,69,45,69,39,83,50,60,\
                 44,10,56,99,59,17,66,68,41,83,26,42,79,0,14,48,44,96,65,37,\
                 61,85,72,40,24,82,0,44,58,35,43,28,67,58,37,28,34,40,61,45,\
                 22,28,60,91,81,51,83,75,97,91,58,83,68,57,55,38,87,59,100,80]);
    x3=np.array([98,56,31,78,30,90,73,52,37,21,93,97,25,60,90,66,28,69,46,67,\
                 32,56,95,6,1,86,52,89,61,26,82,97,52,43,39,95,59,2,22,6,83,\
                 91,0,51,76,94,45,37,11,40,9,12,7,19,60,38,17,76,55,89,91,44,\
                 18,55,24,73,32,29,75,9,69,95,84,69,58,59,93,37,1,57,84,75,17,\
                 70,40,6,86,11,71,64,95,19,21,17,3,79,33,41,19,42]);
    y1=np.array([973,202,77,541,59,796,429,187,131,63,853,938,40,256,738,335,77,357,\
                 114,308,100,210,906,10,12,677,181,710,250,30,594,936,160,133,83,909,\
                 222,71,42,43,591,763,33,240,479,839,144,102,27,137,9,22,65,9,220,80,\
                 25,535,216,721,794,161,63,186,22,466,35,49,463,18,354,871,638,367,216,\
                 223,818,68,38,208,604,435,47,434,135,29,710,62,457,353,899,79,64,44,\
                 39,512,119,105,114,148]);
    y2=np.array([63,107,170,117,34,97,868,139,648,851,123,466,82,791,105,73,77,91,201,\
                 380,71,91,194,574,42,52,1035,127,593,811,170,119,391,228,71,137,22,81,\
                 328,337,28,646,34,882,175,153,879,216,676,137,10,46,77,13,13,37,21,179,\
                 406,35,79,113,203,54,28,905,10,180,480,138,326,166,53,132,409,955,28,23,\
                 38,40,275,148,276,726,265,59,226,148,242,703,636,95,620,335,784,113,\
                 437,39,541,1038]);
    print('shape x1:')
    print np.shape(x1)
    print('shape x2:')
    print np.shape(x2)
    print('shape x3:')
    print np.shape(x3)
    print('shape y1:')
    print np.shape(y1)
    print('shape y2:')
    print np.shape(y2)
    s1=np.zeros((1,np.size(y1)))
    s2=np.zeros((1,np.size(y1)))
    # for i in range(100):
    # s1[i]=(1.0/(1.0+np.exp(-y1[i])));#scale the output to the (0,1) range
    #     s2[i]=(1.0/(1.0+np.exp(-y2[i])));
    s1 = (1.0/(1.0+np.exp(-y1)));
    s2 = (1.0 / (1.0 + np.exp(-y2)));
    # data=np.array([[x1],[x2],[x3],[s1],[s2]]);
    # x = np.sta
    print 'Create data set----"data succeeded\n'
    return np.array([[x1],[x2],[x3],[s1],[s2]]);
#=====================================#=====================================#=====================================#==============================
# a=datafile();
# print a;
# function [sigma]=BP_BACK(training_example,eta)

def BP_BACK(training_example,eta):
    # np.random.seed(1)

    ssize=np.array(np.shape(training_example)) ;
    print ssize
    m=ssize[0]; n=ssize[2];
    print m
    print n
    # sigma=np.zeros(1,n);

    sigma=np.array(range(n),np.float64);
          # %m--row, n--column
        # % Initialize weight matrix -0.5~0.5
    w = e.g. random.rand (2,3) - 0.5;
    v = np.random.rand (3,2) - 0.5;
    u = np.random.rand(2,3) - 0.5;
    # %------------------------
    divDebugWindow()
    print('Initialize weight wvu end\n')
        # for num = 1:n % for each group of input and output
    for num in range(n):
        # forward pass
        print num
        one_sample = training_example[:,0,num];

        print ('onesample_shape:')
        print (np.shape(one_sample))
        print one_sample
        x = one_sample[0:3]
        print('shape  x: ')
        print (np.shape(x))
        y = one_sample[3:5]
        print('shape  y: ')
        print np.shape(y)
        print('shape  w: ')
        print np.shape(w)

        print ('begin w*x')
        # print ('x',x,'w',w)
        deBugout('x',x)
        deBugout ('w', w)
        net2 = np.dot(x,w.T)
        deBugout ('net2:', net2)
        # net2 = np.dot(w,np.tile(np.transpose(np.transpose([x])),(3,2)));


        # print('Finish-----np.dot(w,np.transpose(x))')
        # print ('net2')
        # print net2
        # for i in range(2):
        #     hidden1(i)=1/(1+np.exp(-net2[i]));
        hidden1 = 1 / (1 + np.exp(-net2));
        divDebugWindow()
        # print hidden1
        # print ('size v ',np.shape(v))
        # w = e.g. random.rand (2, 3) - 0.5;
        # v = np.random.rand (3, 2) - 0.5;
        # u = np.random.rand(2, 3) - 0.5;

        deBugout('hidden1',hidden1)
        debugout('v',v)
        net3 = np.dot(hidden1,v.T)
        # net3 = np.dot(v,np.transpose(hidden1));
        deBugout ('net3', net3)
        divDebugWindow() # =================================================
        deBugout ('u', u)
        hidden2 = 1 / (1 + np.exp(-net3));

        # print ('hidden2',hidden2)
        deBugout('hidden2',hidden2)
        net4 = np.dot(hidden2, u.T);
        deBugout ('net4', net4)
        divDebugWindow() # =================================================
        o = 1 / (1 + np.exp (-net4))
        deBugout ('o', o)
        # % % -------------Backpropagation algorithm, calculate the delta value of each layer (the derivative of the error E to the weight of each layer)------------- ----
        delta3 = (y --o) * o * (1 --o); #% Calculation formula
        deBugout ('delta3', delta3)
        divDebugWindow() # =================================================

        # delta2(j) = hidden2(j) * (1 - hidden2(j)) * delta3 * u(:, j);
        # print ('u', u)

        deBugout ('u', u)
        # delta2=1
        deBugout('delta3afterCut', delta3[0:np.size(hidden2)])
        deBugout('hidden2',hidden2)
        deBugout ('u', u)
        # deBugout('hidden2 * (1 - hidden2 )* delta3[0:np.size(hidden2)]',hidden2 * (1 - hidden2 )* delta3[0:np.size(hidden2)])
        divDebugWindow() # =================================================
        # delta2 = np.dot(hidden2 * (1 - hidden2 )* delta3[0:np.size(hidden2)],u);
        # deBugout('delta3[0:np.size(hidden2)]',delta3[0:np.size(hidden2)])
        deBugout ('delta3', delta3)
        deBugout('hidden2',hidden2)
        delta2 = hidden2[0:2] * (1 - hidden2[0:2])*delta3;
        # for j = 1:3 % The calculation formula is related to the delta value of the next layer
        # delta2(j) = hidden2(j) * (1 - hidden2(j)) * delta3 * u(:, j);
        # end
        deBugout ('delta2', delta2);
        # for j in range(3):
        print ('Finished computing delta2.......')
        divDebugWindow()# =================================================
        print ('beginning computing delta1.......')
        #     delta2[j] = hidden2[j] * (1 - np.dot(hidden2[j]) * delta3,u[:, j]);
        #
        deBugout('hidden1',hidden1)
        deBugout ('delta2', delta2)
        debugout('v',v)
        debugout('v[0:2,:]',v[0:2,:])
        deBugout('hidden1[0:2]*(1-hidden1[0:2])*delta2[0:2]',hidden1[0:2]*(1-hidden1[0:2])*delta2[0:2])
        print (np.shape([hidden1[0:2]*(1-hidden1[0:2])*delta2[0:2]]))
        delta1 = np.dot(hidden1[0:2]*(1-hidden1[0:2])*delta2[0:2],v[0:2,:])
        print ('Finished computing delta1.......')
        deBugout ('delta1', delta1)
        divDebugWindow()  # =================================================

        # delta1(k) = hidden1(k) * (1 - hidden1(k)) * delta2 * v(:, k);
        # delta1 =1
        # for k in range(2):
        #     delta1[k] = hidden1[k] * np.dot((1 - hidden1[k]) * delta2 ,v[:, k]);
        print ('Begining updating weight.......')
        # %-------After the delta of each layer is calculated, start to update the weights -----------
        # %---Update u weights-----
        deBugout ('u', u)
        deBugout ('delta3', delta3)
        deBugout('hidden2',hidden2)
        # u = u + and * delta3 * hidden2;

        for i in range(2):
            for j in range(3):
                u[i,j] = u[i,j] + eta*delta3[i]*hidden2[j];
        deBugout('u_after', u)
         # %---Update v rights
        print ('Finished updating u.........')
        divDebugWindow()  # =================================================

        deBugout ('delta2', delta2)
        deBugout('hidden1',hidden1)
        for i in range(2):
            for j in range(2):
                v[i,j] = v[i,j] + eta*delta2[i]*hidden1[j];
        deBugout('v_after', v)
        print ('finished updating v.........')
        divDebugWindow()  # =================================================
        # %---Update w weights-----
        print ('Begining updating w..........')
        deBugout ('w', w)
        deBugout ('delta1', delta1)
        deBugout('x', x)
        for i in range(2):
            for j in range(2):
                w [i, j] = w [i, j] + eta * delta1 [i] * x [j];
        deBugout('w_after', w)
        print ('Finished updating w.........')
        divDebugWindow()  # =================================================
         # %------------- record the error value after this process
        deBugout ('o', o)
        deBugout('y', y)
        e=oy;#% Calculate the error vector (calculation output - target output)
        deBugout ('e', e)
        ee = np.sum (np.dot (np.transpose (e), e))
        deBugout ('yes', ee)
        print (ee)
        # deBugout('y', y)
        # deBugout('err', e)
        # e=;
        print('updating sigma',num,'th')
        # sigma[num] = np.dot(np.transpose(e),e);#% Calculate the sum of squares of errors
        # sigma [num] = ee;
        deBugout('sigma',sigma)
        # plt.plot(sigma);
        # sigma = 0
        # print sigma
        # np.append(sigma,ee);
        sigma [num] = ee;
        print ('Finished updating Sigma')
        divDebugWindow()
    return sigma
        #

print('The main program starts......');
training_example=datafile();

print(training_example)
# pic=plt.plot(training_example)
# pic.s
print ('Shape of training samples:');
print np.shape(training_example)

print('Start using backpropagation algorithm')
#===============================
plotSigma=BP_BACK(training_example,0.9)
#===============================
print ('BPFINISHED')
plt.plot (plotSigma)

plt.xlabel('ITERATION')
plt.ylabel('LOSS')
plt.title('LOSS GRAPH')
plt.grid ()
plt.show()


Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324920005&siteId=291194637