BP neural network prediction MATLAB code template (no debugging required)

This code improves the traditional neural network with the particle swarm algorithm. The value of the particle swarm algorithm is used as the initial value of the neural network weight, with higher accuracy. It improves the gradient descent method of BP neural network back propagation, so that the error can better approach the global optimum. value;

This code can have multiple or single inputs and can correspond to multiple or single outputs;

%% 清空环境
clc
clear
%PSO-BP神经网络预测,
%"多或单输入与多或单输出均可"
%读取数据

%先将测试集与训练集、输入与输出区分好,
%"数据自己拆分好训练集与测试集,xlsx对应命名如下一致即可"
input=readmatrix("input.xlsx");
input_test=readmatrix("input_test.xlsx");
output=readmatrix("output.xlsx");
output_test=readmatrix("output_test.xlsx");
%注意"1列代表一组数据(特征),列数代表特征维度"
%节点个数
inputnum=size(input,2);%神经网络输入
hiddennum=6;%隐藏层神经元个数
outputnum=size(output,2);%神经网络输出

%训练数据
[inputn,inputps]=mapminmax(input'); %输入数据归一化
[outputn,outputps]=mapminmax(output'); %输出数据归一化
%测试数据 
outputt1=output_test';%输出数据
inputtn=mapminmax('apply',input_test',inputps); %输入数据归一化
%预测数据
inn=mapminmax('apply',input_test',inputps); %输入数据归一化
%构建网络
net=newff(inputn,outputn,hiddennum);
% 参数初始化
%粒子群算法中的两个参数
c1 = 1.5;
c2 = 1.5;

maxgen=30;   % 进化次数<----------------------------------------  
sizepop=30;   %种群规模<---------------------------------------
wmax=0.9;
wmin=0.4;

Vmax=1;
Vmin=-1;
popmax=5;
popmin=-5;
Dim=inputnum*hiddennum+hiddennum+hiddennum*outputnum+outputnum;

%% 产生初始粒子和速度
for i=1:sizepop
    %随机产生一个种群
    pop(i,:)=5*rands(1,Dim);    %初始种群
    vov(i,:)=rands(1,Dim);  %初始化速度
    %计算适应度
    fitness(i,:)=fun(pop(i,:),inputnum,hiddennum,outputnum,net,inputn,outputn,inputps,outputps);   %染色体的适应度
end

% 个体极值和群体极值
[bestfitness,bestindex]=min(fitness);
zbest=pop(bestindex,:);   %全局最佳
gbest=pop;    %个体最佳
fitnessgbest=fitness;   %个体最佳适应度值
fitnesszbest=bestfitness;   %全局最佳适应度值

%% 迭代寻优
yy(maxgen)=0;%预分配
for i=1:maxgen
    %粒子位置和速度更新
    for j=1:sizepop
        w=wmax-(wmax-wmin)*j/maxgen;
        %速度更新
        %length(gbest(j,:));
        %length(pop(j,1:Dim))
        vov(j,:) = w*vov(j,:) + c1*rand*(gbest(j,:) - pop(j,1:Dim)) + c2*rand*(zbest - pop(j,1:Dim));
        vov(j,vov(j,:)>Vmax)=Vmax;
        vov(j,vov(j,:)<Vmin)=Vmin;
        
        %种群更新
        pop(j,1:Dim)=pop(j,1:Dim)+0.5*vov(j,:);
        pop(j,pop(j,1:Dim)>popmax)=popmax;
        pop(j,pop(j,1:Dim)<popmin)=popmin;
        
        %引入变异算子,重新初始化粒子
        if rand>0.9
            k=ceil(21*rand);
            pop(j,k)=rand;
        end
       
        %新粒子适应度值
        fitness(j)=fun(pop(j,1:Dim),inputnum,hiddennum,outputnum,net,inputn,outputn,inputps,outputps);
    end
    %%个体极值和群体极值更新
    for j=1:sizepop
    %个体最优更新
    if fitness(j) < fitnessgbest(j)
        gbest(j,:) = pop(j,1:Dim);
        fitnessgbest(j) = fitness(j);
    end
    
    %群体最优更新 
    if fitness(j) < fitnesszbest
        zbest = pop(j,1:Dim);
        fitnesszbest = fitness(j);
    end
    
    end
    %%每代最优值记录到yy数组中
    yy(i)=fitnesszbest;    
        
end

%% 结果分析
figure(1);
plot(yy)
title(['适应度曲线  ' '终止代数=' num2str(maxgen)],'fontsize',12);
xlabel('进化代数','fontsize',12);ylabel('适应度','fontsize',12);

x=zbest;
%% 把最优初始阀值权值赋予网络预测
% %用遗传算法优化的BP网络进行值预测
w1=x(1:inputnum*hiddennum);
B1=x(inputnum*hiddennum+1:inputnum*hiddennum+hiddennum);
w2=x(inputnum*hiddennum+hiddennum+1:inputnum*hiddennum+hiddennum+hiddennum*outputnum);
B2=x(inputnum*hiddennum+hiddennum+hiddennum*outputnum+1:inputnum*hiddennum+hiddennum+hiddennum*outputnum+outputnum);

net.iw{1,1}=reshape(w1,hiddennum,inputnum);
net.lw{2,1}=reshape(w2,outputnum,hiddennum);
net.b{1}=reshape(B1,hiddennum,1);
net.b{2}=B2';

%% BP网络训练
%网络进化参数
net.trainParam.epochs=500;
net.trainParam.lr=0.1;
net.trainParam.goal=0.0000001;

%网络训练
[net,tr]=train(net,inputn,outputn);

%% BP网络预测
%数据归一化
%预测训练数据
an=sim(net,inputn);
anss=mapminmax('reverse',an,outputps);
error=output'-anss;
hold off
figure(2);
plot(error'),hold on
title('仿真预测误差(训练)','fontsize',12);
xlabel('仿真次数','fontsize',12);ylabel('误差值','fontsize',12);

plot(output,'*');hold on
plot(anss','-o','linewidth',2,'MarkerSize',14,'MarkerEdgecolor',[138 151 123]/255);
%legend("误差1","误差2","真实1","真实2","预测1","预测2","actua value","prediction","Location","best")
title('预测训练数据')
xlabel('potato'),ylabel('weight')
 set(gca, 'Box', 'off', 'TickDir', 'out', 'TickLength', [.02 .02], ...
    'XMinorTick', 'on', 'YMinorTick', 'on', 'YGrid', 'on', ...
    'XColor', [.3 .3 .3], 'YColor', [.3 .3 .3],'LineWidth', 1)

%预测测试数据
an=sim(net,inputtn);
anss=mapminmax('reverse',an,outputps);
error=outputt1-anss;
hold off
figure(3);
plot(error'),hold on
title('仿真预测误差(测试)','fontsize',12);
xlabel('仿真次数','fontsize',12);ylabel('误差值','fontsize',12);

plot(outputt1','*');hold on
plot(anss','-o','linewidth',2,'MarkerSize',14,'MarkerEdgecolor',[138 151 123]/255);
%legend("误差1","误差2","真实值1","真实值2","预测值1","预测值2","Location","best")
title('预测测试数据')
xlabel('potato'),ylabel('weight')
 set(gca, 'Box', 'off', 'TickDir', 'out', 'TickLength', [.02 .02], ...
    'XMinorTick', 'on', 'YMinorTick', 'on', 'YGrid', 'on', ...
    'XColor', [.3 .3 .3], 'YColor', [.3 .3 .3],'LineWidth', 1)

%预测
an=sim(net,inn);
anss=mapminmax('reverse',an,outputps)

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%以下保存为fun.m%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function error = fun(x,inputnum,hiddennum,outputnum,net,inputn,outputn,inputps,outputps)
%该函数用来计算适应度值
%x          input     个体
%inputnum   input     输入层节点数
%outputnum  input     隐含层节点数
%net        input     网络
%inputn     input     训练输入数据
%outputn    input     训练输出数据

%error      output    个体适应度值

%提取 BP神经网络初始权值和阈值,x为个体
w1=x(1:inputnum*hiddennum);
B1=x(inputnum*hiddennum+1:inputnum*hiddennum+hiddennum);
w2=x(inputnum*hiddennum+hiddennum+1:inputnum*hiddennum+hiddennum+hiddennum*outputnum);
B2=x(inputnum*hiddennum+hiddennum+hiddennum*outputnum+1:inputnum*hiddennum+hiddennum+hiddennum*outputnum+outputnum);
 
%网络权值赋值
net.iw{1,1}=reshape(w1,hiddennum,inputnum);
net.lw{2,1}=reshape(w2,outputnum,hiddennum);
net.b{1}=reshape(B1,hiddennum,1);
net.b{2}=reshape(B2,outputnum,1);
%BP神经网络构建
net=newff(inputn,outputn,hiddennum);
net.trainParam.epochs=100;
net.trainParam.lr=0.1;
net.trainParam.goal=0.00001;
net.trainParam.show=100;
net.trainParam.showWindow=0;

%BP神经网络训练
net=train(net,inputn,outputn);

%网络训练
an=sim(net,inputn);
output=mapminmax('reverse',outputn,outputps);
anss=mapminmax('reverse',an,outputps);
error=sum(sum(abs(anss-output)));
end

operation result:

 

Guess you like

Origin blog.csdn.net/m0_62526778/article/details/128993930