NOGM(1,1)

NOGM(1,1)

基本思想: PSO优化初始条件, 即:

data = csvread('D:\matlab2016a\demo\data\air.csv');
num = size(data,1);
predictnum = 6;
train = data(1:num-predictnum,:);
test = data((num-predictnum+1):end,:);

dim = 2;
sizepop = 20;
maxgen = 100;
lb = [0.01,1]';ub = [0.99,size(train,1)]';
c1 = 2;c2 = 2;
w_min = 0.1;w_max = 0.9;

for j = 1:size(train,1)
    X(j,:) = sum(train(1:j,:));
end
Y = train(2:end,:);
for j = 2:size(X,1)
    Z(j-1,:) = (X(j,:)+X(j-1,:))/2;
end
B = [-Z,ones(size(Z,1),1)];
ab = inv(B'*B)*B'*Y;
a = ab(1,:);
b = ab(2,:);
    
for i = 1:sizepop
    position(i,:) = lb' + rand(1,dim).*(ub' - lb');
    speed(i,:) = 2*rand(1,dim)-1;
    
    alpha = position(i,1);
    beta = round(position(i,2));
    n = size(train,1);
    S = 0;
    for j = 1:n
        S = S + alpha.^(n-j)*X(j,:);
    end
    
    for j = 2:num
        predict(j-1,:) = (S-b/a)*(1-exp(a))*exp(-a*(j-beta));
    end  
    predict_train = predict(1:size(train,1)-1,:);
    rmse = sqrt(sum((predict_train-train(2:end,:)).^2)/length(predict_train));
    fitness(i,:) = rmse;
end

[bestfitness,bestindex] = min(fitness);
zbest = position(bestindex);
zbestfitness = bestfitness;
gbest = position;
gbestfitness = fitness;


convergence = zeros(1,maxgen);
for L = 1:maxgen
    w = w_max - (w_max - w_min)*L/maxgen;
    for i = 1:sizepop
        %速度更新
        speed(i,:) = w*speed(i,:) + c1*rand*(gbest(i,:)-position(i,:)) + c2*rand*(zbest-position(i,:));
        speed(i,find(speed(i,:)>1)) = 1;
        speed(i,find(speed(i,:)<-1)) = -1;
        
        %位置更新
        position(i,:) = position(i,:) + 0.5*speed(i,:);
        if isempty(find(position(i,:)<lb'))== 0
            position(i,find(position(i,:)<lb')) = (lb(find(position(i,:)<lb')))';
        end
        if isempty(find(position(i,:)>ub'))== 0
            position(i,find(position(i,:)>ub')) = (ub(find(position(i,:)>ub')))';
        end
        
        %计算适应度值
        alpha = position(i,1);
        beta = round(position(i,2));
        n = size(train,1);
        S = 0;
        for j = 1:n
            S = S + alpha.^(n-j)*X(j,:);
        end
        for j = 2:num
            predict(j-1,:) = (S-b/a)*(1-exp(a))*exp(-a*(j-beta));
        end  
        predict_train = predict(1:size(train,1)-1,:);
        rmse = sqrt(sum((predict_train-train(2:end,:)).^2)/length(predict_train));
        fit = rmse;
        
        if fit < gbestfitness(i)
            gbest(i,:) = position(i,:);
            gbestfitness(i) = fit;
        end
        if fit < zbestfitness
            zbest = position(i,:);
            zbestfitness = fit;
        end
    end
    convergence(L) = zbestfitness;
end

best_alpha = zbest(1,1);
best_beta = zbest(1,2);

n = size(train,1);
S = 0;
for j = 1:n
    S = S + best_alpha.^(n-j)*X(j,:);
end
for j = 2:num
    predict(j-1,:) = (S-b/a)*(1-exp(a))*exp(-a*(j-best_beta));
end  
predict_train = predict(1:size(train,1)-1,:);
predict_test = predict((size(train,1)):end,:);

扫描二维码关注公众号,回复: 4349299 查看本文章

train_rmse = sqrt(sum((predict_train-train(2:end,:)).^2)/length(predict_train));
test_rmse = sqrt(sum((predict_test-test).^2)/length(predict_test));
disp(['Train rmse = ',num2str(train_rmse),' Test rmse = ',num2str(test_rmse)])


    
    
    
    
    
 

猜你喜欢

转载自blog.csdn.net/qq_42394743/article/details/84529551
今日推荐