knn_classification_model(Python)

import numpy as np
import pandas as pd
from sklearn import datasets
#数据预处理
from sklearn import preprocessing
#划分数据集/网格搜索参数
from sklearn.model_selection import train_test_split,GridSearchCV
#特征选择
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
#KNN分类模型
from sklearn.neighbors import KNeighborsClassifier
#模型评价(分类报告/混淆矩阵)
from sklearn.metrics import classification_report,confusion_matrix

dataset = datasets.load_iris()
features,labels= dataset.feature_names,list(dataset.target_names)
X,y = dataset.data,dataset.target

scaler = preprocessing.StandardScaler()
x = scaler.fit_transform(X)

#基于树的特征选择方法
clf = ExtraTreesClassifier(n_estimators=10,criterion='gini',random_state=1)
clf.fit(x,y)
print('基于树的特征打分结果如下:',clf.feature_importances_)
model = SelectFromModel(clf,prefit=True)
result = model.transform(x)
index = []
for col in range(x.shape[1]):
   if x[0,col] in list(result[0]):
       index.append(col)
print('基于树的特征选择结果:',index)

#基于Logistic回归的特征选择方法(C越小,选择的特征数越少)
clf = LogisticRegression(penalty='l1',dual=False,C=0.1,random_state=1)
clf.fit(x,y)
print('基于Logistic特征打分结果如下:',clf.coef_[0])
model = SelectFromModel(clf,prefit=True)
result = model.transform(x)
index = []
for col in range(x.shape[1]):
    if x[0,col] in list(result[0]):
        index.append(col)
print('基于Logistic的特征选择结果:',index)

#基于LinearSVC的特征选择方法(C越小,选择的特征数越少)
clf = LinearSVC(penalty='l1',dual=False,C=0.1,random_state=1)
clf.fit(x,y)
print('基于SVM特征打分结果如下:',clf.coef_[0])
model = SelectFromModel(clf,prefit=True)
result = model.transform(x)
index = []
for col in range(x.shape[1]):
    if x[0,col] in list(result[0]):
        index.append(col)
print('基于LinearSVC的特征选择结果:',index)

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=1)

#确定最佳超参数
params = {'n_neighbors':range(2,50)}
model = GridSearchCV(KNeighborsClassifier(),params)
model.fit(x_train,y_train)
print('最佳模型为:\n',model.best_estimator_)
print('最佳参数为:\n',model.best_params_)

clf = KNeighborsClassifier(n_neighbors=3)
clf.fit(x_train,y_train)
print('----------------训练集----------------')
train_predict = clf.predict(x_train)
print(classification_report(y_train,train_predict,target_names=labels))
print(pd.DataFrame(confusion_matrix(y_train,train_predict)))

print('----------------测试集----------------')
test_predict = clf.predict(x_test)
print(classification_report(y_test,test_predict,target_names=labels))
print(pd.DataFrame(confusion_matrix(y_test,test_predict)))

'''
准确率precision = (0)36/36=1 | (1)30/(30+2)=0.94 | (2)35/(35+2)=0.95
召回率recall = (0)36/36=1 | (1)30/(30+2)=0.94 | (2)35/(35+2)=0.95
f1-score = (0)36x2/(36x2)=1 | (1)30x2/(30x2+2+2)=0.94 | (2)35x2/(35x2+2+2)=0.95
'''

猜你喜欢

转载自blog.csdn.net/qinlan1994/article/details/82289798