《深度学习Python实践》第12章——审查分类算法

  1. 逻辑回归
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression

filename='/home/duan/文档/pima indians.txt'
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=read_csv(filename,names=names)
array= data.values
X= array[:,0:8]
Y= array[:,8]
num_folds=10
seed=7
kfold=KFold(n_splits=num_folds, random_state=seed)
model=LogisticRegression()
result=cross_val_score(model,X,Y,cv=kfold)
print(result.mean())

运行结果为:

0.7695146958304853

2.线性判别分析

## LDA
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

filename='/home/duan/文档/pima indians.txt'
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=read_csv(filename,names=names)
array= data.values
X= array[:,0:8]
Y= array[:,8]
num_folds=10
seed=7
kfold=KFold(n_splits=num_folds, random_state=seed)
model=LinearDiscriminantAnalysis()
result=cross_val_score(model,X,Y,cv=kfold)
print(result.mean())
0.773462064251538

3.K近邻

##K近邻算法

from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier

filename='/home/duan/文档/pima indians.txt'
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=read_csv(filename,names=names)
array= data.values
X= array[:,0:8]
Y= array[:,8]
num_folds=10
seed=7
kfold=KFold(n_splits=num_folds, random_state=seed)
model=KNeighborsClassifier()
result=cross_val_score(model,X,Y,cv=kfold)
print(result.mean())

运行结果:

0.7265550239234451

4.贝叶斯分类器

##贝叶斯分类器

from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB

filename='/home/duan/文档/pima indians.txt'
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=read_csv(filename,names=names)
array= data.values
X= array[:,0:8]
Y= array[:,8]
num_folds=10
seed=7
kfold=KFold(n_splits=num_folds, random_state=seed)
model=GaussianNB()
result=cross_val_score(model,X,Y,cv=kfold)
print(result.mean())

运行结果:

0.7551777170198223

5.分类树与回归树

##分类树与回归树

from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier

filename='/home/duan/文档/pima indians.txt'
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=read_csv(filename,names=names)
array= data.values
X= array[:,0:8]
Y= array[:,8]
num_folds=10
seed=7
kfold=KFold(n_splits=num_folds, random_state=seed)
model=DecisionTreeClassifier()
result=cross_val_score(model,X,Y,cv=kfold)
print(result.mean())

运行结果:

0.6860902255639098

6.SVM

##SVM

from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC

filename='/home/duan/文档/pima indians.txt'
names=['preg','plas','pres','skin','test','mass','pedi','age','class']
data=read_csv(filename,names=names)
array= data.values
X= array[:,0:8]
Y= array[:,8]
num_folds=10
seed=7
kfold=KFold(n_splits=num_folds, random_state=seed)
model=SVC()
result=cross_val_score(model,X,Y,cv=kfold)
print(result.mean())

运行结果:

0.6510252904989747

猜你喜欢

转载自blog.csdn.net/zhenaoxi1077/article/details/80651810
今日推荐