Python neural network programming code, neural network code implementation

1. Hello, I want to ask you for the code of the neural network. I saw that you answered the question before, thank you

// BP.cpp : Defines the entry point for the console application.
//This program implements the BP algorithm of the neural network, the number of input nodes, the number of output nodes, the number of hidden layers, and the number of hidden layer nodes are arbitrary and determined by the user.
//The number of hidden layers refers to the total number of layers including the output layer. For example, the XOR algorithm is 2 layers, the number of nodes in the first layer is 2, the number of nodes in the second layer, that is, the output layer is 1, and the number of input points is 2.
//However, this program is not ideal for the implementation of the XOR algorithm, and has better results for the multi-layer and multi-node neural network
#include "stdafx.h"
#include "iostream.h"
#include <time.h>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include "stdio.h "
#define MAXCOUNT 1e5 //The upper limit of iteration training times is 10 to the positive 5th power
//Random floating point number with precision 0.001 , the range is -0.5——0.5
//rand() takes 0 to 32767, and the maximum is 2147483647. % Modulo operation means that the remainder is between 0 and 1000, so multiply the floating point number 0. 001f is the number between 0-1 , then subtract 0.5, which is -0.5 to +0.5
float randf()
{ return (float)((rand() % 1001) * 0.001f-0.5);



//The expectation of the Gaussian distribution random number sequence generated in this way is 0.0, and the variance is 1.0. If you specify the expectation as E and the variance as V, you only need to add: X = X * V + E;
double gaussrand()
{ static double V1, V2, S; static int phase = 0; double X; if(phase == 0) { do { double U1 = (double)rand() / RAND_MAX; double U2 = (double)rand() / RAND_MAX; V1 = 2 * U1 - 1; V2 = 2 * U2 - 1; S = V1 * V1 + V2 * V2; } while(S >= 1 || S == 0); X = V1 * sqrt(-2 * log(S) / S); } else X = V2 * sqrt(-2 * log( S) / S ); phase = 1 - phase; return X; } //Define a multi-layer forward BP network class BP { public: double ***p;//Record all weights






















double ***ddp;//Record all weight increments
int *pnode;//Record the number of nodes in each layer
double **pnodey;//Record the output value of each group of nodes in each layer
double **ddlj ;//Record the ddlj of each group of nodes in each layer
double **pX; //Record input samples
double **pY; //Record input ideal output value
int Sidenum; //Number of hidden layers
int Inputnodenum;
int outputnodenum;
int yangbenzushu;
BP()
{ Sidenum=0; Inputnodenum=0; outputnodenum=0; yangbenzushu=0; } ~BP() { for(int m=0;m<Sidenum;m++) { for(int n=0;n <pnode[m+1];n++) { delete[] p[m][n]; delete[] ddp[m][n]; } delete[] p[m]; delete[] ddp[m]; }

















delete[] p;
delete[] ddp;
p=NULL;
ddp=NULL;
if(p==NULL)
delete [] pnode;
for(int M=0;M<Sidenum;M++)
{ delete[] pnodey[M ]; delete[] ddlj[M]; } delete[] pnodey; delete[] ddlj; pnodey=NULL; ddlj=NULL; } //Complete the initialization of ownership value void getW(int sidenum,int inputnodenum,int outputnodenum1,int yangbenzu) { Sidenum=sidenum; yangbenzushu= yangbenzu;//Number of sample groups Inputnodenum=inputnodenum; outputnodenum=outputnodenum1; p=new double **[sidenum];//Record ownership value ddp=new double **[sidenum];/ /Weight increment pnode=new int [sidenum+1];//Includes the input layer, the number of nodes in each layer of the output layer.


















for(int i=0;i<sidenum+1;i++)
{ int data=0; cout<<"Please enter the number of "<<i<<" layer nodes"<<endl; cin>>data; pnode[ i]=data; } for (int j=0;j<sidenum;j++) // Initialize the weight, does not include the input layer, but includes the output layer. The 0th layer represents the first hidden layer{ p[j ] = new double* [pnode[j+1]]; //First of all, as many nodes as P[j] layer has, there are as many pointers, and each pointer points to a weight array. Because p[j] is a secondary pointer, It stores the address of a pointer, and a pointer can point to a one-dimensional array. ddp[j]=new double*[pnode[j+1]];//same as above for (int k=0;k<pnode[j+1 ];k++) { ddp[j][k]=new double[pnode[j]+1];//Indicates that the kth node of the jth layer points to an array, and this array stores the information of this node Weight. p[j][k]=new double[pnode[j]+1]; for (int t=0;t<pnode[j]+1;t++) //pnode[j]+1 means the first The number of input points in layer j. { ddp[j][k][t]=0;//The weight value of each layer is initialized to 0, which means that the kth node of the jth layer, the tth input Enter weights.
















if(t==0)p[j][k][t]=-fabs(randf());//The threshold value of each layer initializes the 0th element. else p[j][k][
t ]=randf();//Initialize the weight of each layer
}
}
}
//Open memory for recording the output value of each layer node and ddlj pointer
pnodey=new double *[Sidenum]; //A total of Sidenum Layer.ddlj
=new double *[Sidenum];
for(int p=0;p<Sidenum;p++)
{ pnodey[p] = new double [pnode[p+1]+1];//There are a total of nodes in each layer Number + 1 output ddlj[p]=new double [pnode[p+1]];// What does this do? ? pnodey[p][0]=1;//The first value of each group and each layer is 1, this value is to be multiplied by the threshold, which is why there is the above +1 output} } / ****** ***************/ //Each node output function double fas(double s) { double t; t=1.0/(exp(-s)+1); return t ; } /************************************************ */














//This function is used to record sample values ​​and ideal output values
​​void INPUT(int yangbenzushu1 )
{ pY=new double*[yangbenzushu1];//number of yangbenzushu1 ideal output pX=new double*[yangbenzushu1];//number of yangbenzushu1 Sample for(int yu=0;yu<yangbenzushu1;yu++) { pX[yu]=new double[Inputnodenum+1];//The dimension of each sample is the number of input points+1 pY[yu]=new double [outputnodenum+1];//The dimension of the output is also the number of output points+1 } //The first value of each group of samples is assigned to 1, so that the corresponding subscript can be better for(int yu1=0;yu1 <yangbenzushu1;yu1++) { pX[yu1][0]=1; pY[yu1][0]=1; } cout<<"Please enter the sample input value"<<endl; for(int yuy=0;yuy< yangbenzushu1;yuy++) for(int yy=1;yy<=Inputnodenum;yy++) { if(yy==Inputnodenum) cout<<endl;


















cout<<"X["<<yuy<<"]"<<"["<<yy<<"]="<<' ';
cin>>pX[yuy][yy];
}
cout<<" Please enter the ideal output value of the sample "<<endl;
for(int yuy1=0;yuy1<yangbenzushu1;yuy1++)
for(int yy1=1;yy1<=outputnodenum;yy1++)
{ //if(yy==Inputnodenum) cout< <endl;
cout<<"Y["<<yuy1<<"]"<<"["<<yy1<<"]="<<' ';
cin>>pY[yuy1][yy1];
}
}
/**************************************************** ***************************/
//Calculate the output value of each node
double computeYl(int KK)//KK represents which group group number
{ double sum1=0;//Calculate the output value of each node of all layers and record it in pnodey, excluding the input point value for(int y=0;y<Sidenum;y++)//Number of layers { for(int r=1 ;r<pnode[y+1]+1;r++)//The number of this node, plus 1 is for the subscript to look good { double sum=0;







for(int z=0;z<pnode[y]+1;z++)//Number of nodes in the previous layer
{ if(y==0)sum+= pX[KK][z]*p[y][r -1][z]; else sum+=pnodey[y-1][z]*p[y][r-1][z]; } pnodey[y][r]=fas(sum); } } for (int j=1;j<=outputnodenum;j++) sum1+=pow(pY[KK][j]-pnodey[Sidenum-1][j],2); return sum1; } /******* ***************************************************** */ //Compute Back-Propagation-Errors void ComputeBackPropagationErrors(int gf)//gf represents the group number {//calculate all ddlj[][] //for(int gf=0;gf<yangbenzushu;gf++)// Number of groups for(int q=Sidenum-1;q>=0;q--)//Start from the last layer { if(q==Sidenum-1)//If it is the outermost layer {




















for(int rt=0;rt<pnode[q+1];rt++)//Number of nodes in each layer
ddlj[q][rt]=pnodey[q][rt+1]*(1-pnodey[q] [rt+1])*(pY[gf][rt+1]-pnodey[q][rt+1]) ; } else {
for
(
int ry=0;ry<pnode[q+1];ry++) { double sumtemp=0; for(int fg=0;fg<pnode[q+2];fg++) sumtemp+=ddlj[q+1][fg]*p[q+1][fg][ry+1] ; ddlj[q][ry] = pnodey[q][ry+1]*(1-pnodey[q][ry+1])* sumtemp; } } } //calculate all ddp [ ] [ ] // for(int gf1=0;gf1<yangbenzushu;gf1++)//number of groups for(int l=0;l<Sidenum;l++)//number of layers for(int JJ=0;JJ<pnode[l+1]; JJ++)//The number of nodes in each layer for(int i=0;i<pnode[l]+1;i++)//The number of nodes in the previous layer { if(l==0)//If it is the first layer, the y value is the input X value ddp[l][JJ][i]=ddlj[l][JJ]*pX[gf][i];



















else
ddp[l][JJ][i]=ddlj[l][JJ]*pnodey[l-1][i]; }
}
/
****************** ***************************************************** *****/
void UpdatetheWeightsusingBPAlgorithm()
{ for(int cent=0;cent<Sidenum;cent++)//number of layers for(int J=0;J<pnode[cent+1];J++)//every The number of nodes in one layer for(int i=0;i<pnode[cent]+1;i++)//The number of nodes in the previous layer p[cent][J][i]+=0.2*ddp[cent][ J][i]; } /****************************************** *********************************/ double xunlianErrors()//Define the training error function { double error=0; double sum=0; double temp=0; double temp1=0; for(int gf1=0;gf1<yangbenzushu;gf1++)//number of groups {














temp= computeYl(gf1);
//temp1=zhengquelv(gf1);
//sum+=temp1;
for(int jj=1;jj<=outputnodenum;jj++)
cout<<pnodey[Sidenum-1][jj];
error+ =temp;
}
// sum=sum/yangbenzushu;
cout<<"The correct rate obtained by using the training set:"<<sum<<endl;
return error/yangbenzushu;
}
/********** ***************************************************** ***************/
double jiaoyanErrors(int yangbenzushu1 )//Definition of calibration error function
{ double error=0; double sum=0; double temp=0; double temp1=0 ; for(int gf1=0;gf1<yangbenzushu1;gf1++)//number of groups { temp= computeYl(gf1); for(int jj=1;jj<=outputnodenum;jj++) cout<<pnodey[Sidenum-1][jj];









//temp1=zhengquelv(gf1);
//sum+=temp1;
error+=temp;
}
//sum=sum/yangbenzushu1;
//cout<<"The correct rate obtained by using the verification set:"<<sum<< endl;
return error/yangbenzushu1;
}
/****************************************** ***************************/
double zhengquelv(int KK)
{ int count=0; double av=0; //for(int gf1=0;gf1<yangbenzushu;gf1++)//number of groups for(int jj=1;jj<=outputnodenum;jj++) { if (pnodey[Sidenum-1][jj]>0) pnodey[Sidenum-1][ jj]=1; else pnodey[Sidenum-1][jj]=0; if(pY[KK][jj]==pnodey[Sidenum-1][jj])count++; } av=(double)count/outputnodenum ; return av; }












/**************************************************** *********************/
void freeINput()
{ if(pX!=NULL) { for(int u=0;u<yangbenzushu;u++) delete []pX[u]; delete []pX; pX=NULL; } if(pY!=NULL) { for(int u1=0;u1<yangbenzushu;u1++) delete []pY[u1]; delete []pY ; pY=NULL; } } /******************************************** *********************/ //Output all weights void wputout() { for (int j=0;j<Sidenum;j++) { cout< <"The weight of ["<<j+1<<"] layer is: "<<endl; for (int k=0;k<pnode[j+1];k++) {
























//if(k==pnode[j+1]-1) cout<<endl;
for (int t=0;t<pnode[j]+1;t++)
{ cout<<p[j][k] [t]<<' '; if(t==pnode[j]) cout<<endl; } } } } /********************** *************************************/ }; void main() { BP bp; int count= 0;//To count the number of iterations used //FILE *fp; int inputnodenum,outnodenum,sidenum,yangbenzunum; double error; cout<<"Please input: input points, output points, hidden layer number"<<endl; cin>>inputnodenum>>outnodenum>>sidenum; cout<<"Please enter the number of sample groups"<<endl; cin>>yangbenzunum; //The first step is to initialize all weights bp.getW(sidenum,inputnodenum,outnodenum,yangbenzunum); //The second step enters the sample group






















bp.INPUT(yangbenzunum);
for(;;count++)
{ double sum=0; double temp=0; for(int fuzu=0;fuzu<yangbenzunum;fuzu++) { //The third step calculates all y values ​​temp=bp .computeYl(fuzu); //The fourth step Compute Back-Propagation-Errors bp.ComputeBackPropagationErrors(fuzu); //The fifth step Update the Weights using BP Algorithm bp.UpdatetheWeightsusingBPAlgorithm(); sum+=temp; } //The sixth Step to judge whether it converges error=sum/2*yangbenzunum; //freopen("debug\\out.txt", "w", stdout); //fp=freopen( "out.txt", "w", stdout) ; // cout<<count<<' '<<error<<endl; // fclose(stdout);//close file /*if(count==1000)cout<<error<<endl;



















if(count==1500)cout<<error<<endl;
if(count==1600)cout<<error<<endl;*/
//if(count==10000)cout<<error<<endl;
if (error<1.02)
{ cout<<"Cycle Convergence"<<"The number of iterations is: "<<count<<endl; //bp.freeINput();//Release XY space break; } } cout<<"right The value is: "<<endl; bp.wputout(); double XUNLIANER=bp.xunlianErrors(); //cout<<"The training error is: "<<XUNLIANER<<endl; bp.freeINput();//Release XY space /* cout<<"Please input the verification sample: "<<endl; int jiaoyannum=0; cin>>jiaoyannum; bp.INPUT(jiaoyannum); double jiaoyanER=bp.jiaoyanErrors(jiaoyannum); cout<<"The verification error is:"<<jiaoyanER<<endl; //fclose( stdout ) ;*/ }


















Google AI Writing Project: Little Fat Cat

2. What does qnn mean in Internet terms?

Neural network (QNN) method, with very low precision (eg 1bit) weights and activations at runtime. Neural network reference: a technical nerd's study notes . During training, quantized weights and activations are used to compute parameter gradients. During the forward pass, QNN greatly reduces memory size and accesses, and replaces most arithmetic operations with bitwise operations. As a result, power consumption is expected to be greatly reduced. We trained QNNs with MNIST, CIFAR-10, SVHN and ImageNet datasets. The resulting QNN can achieve prediction accuracy comparable to 32-bit counterparts. For example, our quantized version of AlexNet with 1-bit weights and 2-bit activations achieves 51% top-1 accuracy. Furthermore, we also quantize the parameter gradients to 6-bit, which enables gradient computation using only bitwise operations. Quantized recurrent neural networks were tested on the Penn Treebank dataset and achieved comparable accuracy to 32-bit using only 4-bit. Last but not least, we programmed a binary matrix multiplication GPU core, using which it was possible to run MNIST QNN up to 7 times faster without loss of classification accuracy compared to an unoptimized GPU core. The QNN code has been open sourced.

3. Find a piece of neural network MATLAB code 50

function [presim ss net] = simnonlin( y,d,n )
% y-- time series data, column vector
% d-- time delay parameter, positive integer
% n-- the number of points used for training, positive integer
trainset = gettrain(y,d);
inputs = trainset(:,1:end-1)';
targets = trainset(:,end)';
net = feedforwardnet(20,'trainscg');
% net = newff(inputs ,targets,40);
% net = train(net,inputs,targets);
net=train(net,inputs,targets);
presim(1:d)=y(end-d+1:end);
for i = d+1:d + n
presim(i) = sim(net,presim(id:i-1)');
end
ss = presim(d+1:end)';
end
call example:
t=[1:100 ]';
y = exp(-0.1*t).*sin(t);
d=10;
n=80;
sim = simnonlin( y,d,n );

4. BP neural network prediction code 15

You are doing time series.
You can go to "Neural Network Home" nnetinfo----" to study tutorial two ---> the application of neural network in time series
is explained above. I will excerpt the code to you
% time series: the application of neural network in time series
% This code comes from "Neural Network Home"
timeList = 0 :0.01 : 2*pi; % Generate time point
X = sin(timeList); % Generate a time series signal
% Use x(t-5), x(t-4), x(t-3), x(t-2), x(t-1) as input to predict x(t), and x (t) as output data
inputData = [X(1:end-5);X(2:end-4);X(3:end-3);X(4:end-2);X(5:end -1)];
outputData = X(6:end);
%Use the input and output data (inputData, outputData) to build the network,
%The number of hidden nodes is set to 3. The transfer functions of the hidden layer and the output layer are tansig and purelin, trained using the trainlm method.
net = newff(inputData,outputData,3,{'tansig','purelin'},'trainlm'); %
Set some common parameters
net.trainparam.goal = 0.0001; %Training goal: the mean square error is lower than 0.0001
net. trainparam.show = 400; % show the result every 400 times of training
net.trainparam.epochs = 1500; %Maximum training times: 15000.
[net,tr] = train(net,inputData,outputData);%Call the train function that comes with the matlab neural network toolbox to train the network
simout = sim(net, inputData); % Call the sim function that comes with the matlab neural network toolbox to get the predicted value
figure of the network; % Create a new drawing window window
t=1:length(simout);
plot(t,outputData,t,simout,'r') %Draw a picture, compare the original output and the output predicted by the network
%------------------Additional: extract mathematical expression------------- ---------------top
% If you want to use the trained network without the sim function of matlab, you can extract the mathematical expression, |
% so that in any software, you only need to press the expression can be calculated. |
%============= Extract mathematical expression ===================
% Extract the weight and threshold of the network
w12 = net.iw {1,1}; % Weight b2 from layer 1 (input layer) to layer 2 (hidden layer)
= net.b{1}; % Threshold
w23 of layer 2 (hidden layer) = net.lw{ 2,1}; % Weight b3 from layer 2 (hidden layer) to layer 3 (output layer)
= net.b{2}; %Threshold value of layer 3 (output layer)
% Due to normalization, The normalized information must be captured first
iMax = max(inputData,[],2);
iMin = min(inputData,[],2);
oMax = max(outputData,[],2);
oMin = min(outputData,[],2);
% Method 1: Normalization--->Calculation output- -->Denormalization
normInputData=2*(inputData -repmat(iMin,1,size(inputData,2)))./repmat(iMax-iMin,1,size(inputData,2)) -1; tmp
= w23*tansig( w12 *normInputData + repmat(b2,1,size(normInputData,2))) + repmat(b3,1,size(normInputData,2)); myY = (tmp
+1).*repmat(oMax- oMin,1,size(outputData,2))./2 + repmat(oMin,1,size(outputData,2)); %Method 2:
Use real weights and thresholds for calculation
% For the formula, please refer to "Extracting the corresponding original Weights and thresholds for data >
W12 = w12 * 2 ./repmat(iMax' -iMin',size(w12,1),1);
B2 = -w12* (2*iMin ./(iMax - iMin) + 1) + b2;
W23 = w23 .*repmat((oMax -oMin),1,size(w23,2))/2;
B3 = (oMax -oMin) .*b3 /2 + (oMax -oMin)/2 + oMin ;
% final math expression:
myY2 = W23 *tansig( W12 *inputData + repmat(B2,1,size(inputData,2))) + repmat(B3,1,size(inputData,2));

Guess you like

Origin blog.csdn.net/wenangou/article/details/127501576
Recommended