Machine learning operations (c) multi-class classification and neural network --Python (numpy) to achieve

The title is too long! Download [ portal ]

Question 1

Description: identification number on the picture.

  . 1  Import numpy AS NP
   2  Import scipy.io AS SCIO
   . 3  Import matplotlib.pyplot AS PLT
   . 4  Import scipy.optimize AS OP
   . 5  
  . 6  # display image data 
  . 7  DEF displayData (X-):
   . 8      m = np.size (X-, 0)   # number of rows of X, i.e., the number of samples 
  . 9      n-np.size = (X,. 1)   # number of columns of X, i.e., a single sample size 
10      example_width = int (np.round (np.sqrt (n-)))   # single image width 
. 11      example_height = int (np.floor (n-/ example_width))   # single image height 
12     int = display_rows (np.floor (np.sqrt (m)))   # shown in the figure, how many line in FIG. 
13 is      display_cols = int (np.ceil (m / display_rows))   # shown in the figure, a number pictures 
14      PAD = 1   # interval between image 
15      display_array = - np.ones ((+ display_rows * PAD (example_height + PAD),
 16                              PAD * + display_cols (example_width + PAD)))   # initialization image matrix 
. 17      curr_ex = 0   # current picture count 
18      # to each thumbnail image is inserted into the array 
. 19      for J in Range (0, display_rows):
 20 is          for I in range(0, display_cols):
 21             if curr_ex >= m:
 22                 break
 23             max_val = np.max(abs(X[curr_ex, :]))
 24             jstart = pad + j * (example_height + pad)
 25             istart = pad + i * (example_width + pad)
 26             display_array[jstart: (jstart + example_height), istart: (istart + example_width)] = \
 27                 np.array(X[curr_ex, :]).reshape(example_height, example_width) / max_val
 28             curr_ex = curr_ex + 1
 29         if curr_ex >= m:
 30             break
 31     display_array = display_array.T
 32     plt.imshow(display_array,cmap=plt.cm.gray)
 33     plt.axis('off')
 34     plt.show()
 35 
 36 #计算hθ(z)
 37 def sigmoid(z):
 38     g = 1.0 / (1.0 + np.exp(-z))
 39     return g
 40 
 41 #计算cost
 42 def lrCostFunction(theta, X, y, lamb):
 43     theta = np.array(theta).reshape((np.size(theta), 1))
 44     m = np.size(y)
 45     h = sigmoid(np.dot(X, theta))
 46     J = 1 / m * (-np.dot(y.T, np.log(h)) - np.dot((1 - y.T), np.log(1 - h)))
 47     theta2 = theta[1:, 0]
 48     Jadd = lamb / (2 * m) * np.sum(theta2 ** 2)
 49     J = J + Jadd
 50     return J.flatten()
 51 
 52 #计算梯度
 53 def gradient(theta, X, y, lamb):
 54     theta = np.array(theta).reshape((np.size(theta), 1))
 55     m = np.size(y)
 56     h = sigmoid(np.dot(X, theta))
 57     grad = 1/m*np.dot(X.T, h - y)
 58     theta[0,0] = 0
 59     gradadd = lamb/m*theta
 60     grad = grad + gradadd
 61     return grad.flatten()
 62 
 63 #θ计算
 64 def oneVsAll(X, y, num_labels, lamb):
 65     m = np.size(X, 0)
 66     n = np.size(X, 1)
 67     all_theta = np.zeros((num_labels, n+1))
 68     one = np.ones(m)
 69     X = np.insert(X, 0, values=one, axis=1)
 70     for c in range(0, num_labels):
 71         initial_theta = np.zeros(n+1)
 72         y_t = (y==c)
 73         result = op.minimize(fun=lrCostFunction, x0=initial_theta, args=(X, y_t, lamb), method='TNC', jac=gradient)
 74         all_theta[c, :] = result.x
 75     return all_theta
 76 
 77 #计算准确率
 78 def predictOneVsAll(all_theta, X):
 79     m =np.size (X-, 0)
 80      num_labels = np.size (all_theta, 0)
 81      P = np.zeros ((m,. 1))   # is used to save the maximum of each row 
82      G = np.zeros ((NP .size (X-, 0), num_labels))   # is used to save the results of each classification (classification total of 10 times, each time to save a) 
83      One = np.ones (m)
 84      X-= np.insert (X-, 0, values One =, = Axis. 1 )
 85      for C in Range (0, num_labels):
 86          Theta = all_theta [C,:]
 87          G [:, C] = Sigmoid (np.dot (X-, Theta .T))
 88      P = g.argmax (Axis =. 1 )
 89      #Print (P) 
90      return p.flatten ()
 91 is  
92  # loading the data file 
93 Data = scio.loadmat ( ' ex3data1.mat ' )
 94 X-Data = [ ' X- ' ]
 95 Y = Data [ ' Y ' ]
 96 Y 10% Y =   # because the data set is considered from the start of the matlab 1, save the results to 10 0, modulo here, the back 10 0 
97 m = np.size (X-, 0)
 98 rand_indices = NP. the random.randint (0, m, 100 )
 99 SEL = X-[rand_indices,:]
 100  displayData (SEL)
 101 
102  # calculating [theta] 
103 Lamb = 0.1
 104 num_labels = 10
 105 all_theta = oneVsAll (X-, Y, num_labels, Lamb)
 106  # Print (all_theta) 
107  
108  # calculating a predicted accuracy of 
109 Pred = predictOneVsAll (all_theta, X-)
 110  # np.set_printoptions (threshold = np.inf) 
111  # encountered a pit on this calculation: 
112  # Pred output is [[...]] form, into 1-dimensional vectors need to flatten, y becomes the same with flatten into a one-dimensional vector 
113 ACC = np.mean (Pred y.flatten == ()) * 100
 114  Print ( ' Training the Set the Accuracy: ' , ACC,'%')

 

operation result:

 

 

Problem 2

Introduction: Using neural network digital identification (Θ provided)

. 1  Import numpy AS NP
 2  Import scipy.io AS SCIO
 . 3  Import matplotlib.pyplot AS PLT
 . 4  Import scipy.optimize AS OP
 . 5  
. 6  # display image data 
. 7  DEF displayData (X-):
 . 8      m = np.size (X-, 0)   # number of rows of X, i.e., the number of samples 
. 9      n-np.size = (X,. 1)   # number of columns of X, i.e., a single sample size 
10      example_width = int (np.round (np.sqrt (n-)))   # single image width 
. 11      example_height = int (np.floor (n-/ example_width))   # single image height 
12     int = display_rows (np.floor (np.sqrt (m)))   # shown in the figure, how many line in FIG. 
13 is      display_cols = int (np.ceil (m / display_rows))   # shown in the figure, a number pictures 
14      PAD = 1   # interval between image 
15      display_array = - np.ones ((+ display_rows * PAD (example_height + PAD),
 16                              PAD * + display_cols (example_width + PAD)))   # initialization image matrix 
. 17      curr_ex = 0   # current picture count 
18      # to each thumbnail image is inserted into the array 
. 19      for J in Range (0, display_rows):
 20 is          for I in range(0, display_cols):
21             if curr_ex >= m:
22                 break
23             max_val = np.max(abs(X[curr_ex, :]))
24             jstart = pad + j * (example_height + pad)
25             istart = pad + i * (example_width + pad)
26             display_array[jstart: (jstart + example_height), istart: (istart + example_width)] = \
27                 np.array(X[curr_ex, :]).reshape(example_height, example_width) / max_val
28             curr_ex = curr_ex + 1
29         if curr_ex >= m:
30             break
31     display_array = display_array.T
32     plt.imshow(display_array,cmap=plt.cm.gray)
33     plt.axis('off')
34     plt.show()
35 
36 #计算hθ(z)
37 def sigmoid(z):
38     g = 1.0 / (1.0 + np.exp(-z))
39     return g
40 
41 #实现神经网络
42 def predict(theta1, theta2, X):
43     m = np.size(X,0)
44     p = np.zeros((np.size(X, 0), 1))
45      # of the second layer is calculated 
46 is      One = np.ones (m)
 47      X-np.insert = (X-, 0, values One =, = Axis. 1 )
 48      A2 = Sigmoid (np.dot (X-, theta1.T))
 49      # third layer calculated 
50      One = np.ones (np.size (a2,0))
 51 is      A2 = np.insert (A2, 0, values One =, = Axis. 1 )
 52 is      A3 = Sigmoid (np.dot ( A2, theta2.T))
 53 is      P = a3.argmax (Axis =. 1). 1 +   # Y value of 1-10, 0-9 to add it here. 1 
54 is      return p.flatten ()
 55  
56 is  # read data file 
57 is data = scio.loadmat ( 'ex3data1.mat')
58 X = data['X']
59 y = data['y']
60 m = np.size(X, 0)
61 rand_indices = np.random.randint(0,m,100)
62 sel = X[rand_indices, :]
63 # displayData(sel)
64 
65 theta = scio.loadmat('ex3weights.mat')
66 theta1 = theta['Theta1']
67 theta2 = theta['Theta2']
 68  
69  # forecast accuracy 
70 Pred = Predict (Theta1, Theta2, X-)
 71 is ACC = np.mean (Pred y.flatten == ()) * 100
 72  Print ( ' Training the Set the Accuracy: ' , ACC, ' % ' )
 73 is  
74  # to identify a single image 
75  for I in Range (0, m):
 76      IT np.random.randint = (0, m,. 1 )
 77      IT = IT [0]
 78      displayData (X-[IT: IT + 1'd ,:])
 79      Pred = Predict (Theta1, Theta2, X-[IT: IT +. 1 ,:])
80     print('Neural Network Prediction:', pred)
81     print('input q to exit:')
82     cin = input()
83     if cin == 'q':
84         break

Neural network analysis matrix representation:

operation result:

 

Guess you like

Origin www.cnblogs.com/orangecyh/p/11700022.html