Fisher线性分类器通俗解释及MATLAB、Python实现

一、通俗的解释:

问题提出:还是以iris的数据为例,有A、B、C三种花,每一类的特征都用4维特征向量表示。现在已知一个特征向量,要求对应的类别,而我们人可以直接通过眼睛看而作出分类的是在一维二维三维空间,而不适应这样的四维数据。

启示:假设有这样的一个方向向量,其与特征向量进行内积运算(即向方向向量的投影)后,结果为一个数值,若同类的特征向量投影后聚集在一起,不同类的特征投影后相对分散,那么,我们的目的就达到了。

目标:这样就有了方向,即要寻找一个独特的方向,使其达到我们的要求。

注:具体的推导过程,参看教科书,另外,在求解极值的时候,利用了矩阵论中的向量导数运算。

二、MATLAB程序:

clear

A=[5.1,3.5,1.4,0.2

4.9,3.0,1.4,0.2

4.7,3.2,1.3,0.2

4.6,3.1,1.5,0.2

5.0,3.6,1.4,0.2

5.4,3.9,1.7,0.4

4.6,3.4,1.4,0.3

5.0,3.4,1.5,0.2

4.4,2.9,1.4,0.2

4.9,3.1,1.5,0.1

5.4,3.7,1.5,0.2

4.8,3.4,1.6,0.2

4.8,3.0,1.4,0.1

4.3,3.0,1.1,0.1

5.8,4.0,1.2,0.2

5.7,4.4,1.5,0.4

5.4,3.9,1.3,0.4

5.1,3.5,1.4,0.3

5.7,3.8,1.7,0.3

5.1,3.8,1.5,0.3

5.4,3.4,1.7,0.2

5.2,4.1,1.5,0.1

5.5,4.2,1.4,0.2

4.9,3.1,1.5,0.1

5.0,3.2,1.2,0.2

5.5,3.5,1.3,0.2

4.9,3.1,1.5,0.1

4.4,3.0,1.3,0.2

5.1,3.4,1.5,0.2

5.0,3.5,1.3,0.3

4.5,2.3,1.3,0.3

4.4,3.2,1.3,0.2

5.0,3.5,1.6,0.6

5.1,3.8,1.9,0.4

4.8,3.0,1.4,0.3

5.1,3.8,1.6,0.2

4.6,3.2,1.4,0.2

5.3,3.7,1.5,0.2

5.0,3.3,1.4,0.2

7.0,3.2,4.7,1.4];

B=[6.4,3.2,4.5,1.5

6.9,3.1,4.9,1.5

5.5,2.3,4.0,1.3

6.5,2.8,4.6,1.5

5.7,2.8,4.5,1.3

6.3,3.3,4.7,1.6

4.9,2.4,3.3,1.0

6.6,2.9,4.6,1.3

5.2,2.7,3.9,1.4

5.0,2.0,3.5,1.0

5.9,3.0,4.2,1.5

6.0,2.2,4.0,1.0

6.1,2.9,4.7,1.4

5.6,2.9,3.6,1.3

6.7,3.1,4.4,1.4

5.6,3.0,4.5,1.5

5.8,2.7,4.1,1.0

6.2,2.2,4.5,1.5

5.6,2.5,3.9,1.1

5.9,3.2,4.8,1.8

6.1,2.8,4.0,1.3

6.3,2.5,4.9,1.5

6.1,2.8,4.7,1.2

6.4,2.9,4.3,1.3

6.6,3.0,4.4,1.4

6.8,2.8,4.8,1.4

6.7,3.0,5.0,1.7

6.0,2.9,4.5,1.5

5.7,2.6,3.5,1.0

5.5,2.4,3.8,1.1

5.5,2.4,3.7,1.0

5.8,2.7,3.9,1.2

6.0,2.7,5.1,1.6

5.4,3.0,4.5,1.5

6.0,3.4,4.5,1.6

6.7,3.1,4.7,1.5

6.3,2.3,4.4,1.3

5.6,3.0,4.1,1.3

5.5,2.5,4.0,1.3

5.5,2.6,4.4,1.2

6.1,3.0,4.6,1.4

5.8,2.6,4.0,1.2

5.0,2.3,3.3,1.0

5.6,2.7,4.2,1.3

5.7,3.0,4.2,1.2

5.7,2.9,4.2,1.3

6.2,2.9,4.3,1.3

5.1,2.5,3.0,1.1

5.7,2.8,4.1,1.3];

C=[6.3,3.3,6.0,2.5

5.8,2.7,5.1,1.9

7.1,3.0,5.9,2.1

6.3,2.9,5.6,1.8

6.5,3.0,5.8,2.2

7.6,3.0,6.6,2.1

4.9,2.5,4.5,1.7

7.3,2.9,6.3,1.8

6.7,2.5,5.8,1.8

7.2,3.6,6.1,2.5

6.5,3.2,5.1,2.0

6.4,2.7,5.3,1.9

6.8,3.0,5.5,2.1

5.7,2.5,5.0,2.0

5.8,2.8,5.1,2.4

6.4,3.2,5.3,2.3

6.5,3.0,5.5,1.8

7.7,3.8,6.7,2.2

7.7,2.6,6.9,2.3

6.0,2.2,5.0,1.5

6.9,3.2,5.7,2.3

5.6,2.8,4.9,2.0

7.7,2.8,6.7,2.0

6.3,3.4,5.6,2.4

6.4,3.1,5.5,1.8

6.0,3.0,4.8,1.8

6.9,3.1,5.4,2.1

6.7,3.1,5.6,2.4

6.9,3.1,5.1,2.3

5.8,2.7,5.1,1.9

6.8,3.2,5.9,2.3

6.7,3.3,5.7,2.5

6.7,3.0,5.2,2.3

6.3,2.5,5.0,1.9

6.5,3.0,5.2,2.0

6.2,3.4,5.4,2.3

5.9,3.0,5.1,1.8];

%方法一:先将A作为一类,BC作为一类

NA=size(A,1);NB=size(B,1);NC=size(C,1);

A_train=A(1:floor(NA/2),:);%训练数据取1/2(或者1/3,3/4,1/4

B_train=B(1:floor(NB/2),:);

C_train=C(1:floor(NC/2),:);

A_test=A((floor(NA/2)+1):end,:);

B_test=B((floor(NB/2)+1):end,:);

C_test=C((floor(NC/2)+1):end,:);

A_train=A_train;

D_train=[B_train;C_train];

A_test=A_test;

D_test=[B_test;C_test];

u1=mean(A_train);u2=mean(D_train);

S1=0;S2=0;

for i=1:size(A_train,1)

    S1=S1+(A_train(i,:)-u1)'*(A_train(i,:)-u1);

end

for i=1:size(D_train,1)

    S2=S2+(D_train(i,:)-u2)'*(D_train(i,:)-u2);

end

Sw=S1+S2;

w1=(inv(Sw)*(u1-u2)')';

w1=w1./norm(w1);

y0=w1*(u1+u2)'/2;

% a1=w*u1'

% d1=w*u2'

r1=0;

for i=1:size(D_test,1)

    if w1*D_test(i,:)'<y0

        r1=r1+1;    

    end

end

rate_D=r1/size(D_test,1)

r2=0;

for i=1:size(A_test,1)

    if w1*A_test(i,:)'>y0

        r2=r2+1;    

    end

end

rate_A=r2/size(A_test,1)

 三、Python程序:

from sklearn import discriminant_analysis
from sklearn.model_selection import train_test_split
import numpy

data = numpy.genfromtxt('iris.csv', delimiter=',', usecols=(0,1,2,3))
target = numpy.genfromtxt('iris.csv', delimiter=',', usecols=(4), dtype=str)

t = numpy.zeros(len(target))
t[target == 'setosa'] = 1
t[target == 'versicolor'] = 2
t[target == 'virginica'] = 3

clf = discriminant_analysis.LinearDiscriminantAnalysis()
train, test, t_train, t_test = train_test_split(data, t, test_size=0.5, random_state=0)
clf.fit(train, t_train)
print(clf.score(test,t_test))
#print(clf.predict([data[3]]))

  

    

猜你喜欢

转载自www.cnblogs.com/tzenthin/p/9726683.html