python数据分析与挖掘实战---基于水色图像的水质评价拓展训练

这里写图片描述

import pandas as pd
fileTest ='chapter9/test.xls'
dataT =pd.read_excel(fileTest,encoding='utf-8') #读取数据,指定编码

#将I II III IV V VI 转换为数字
dataT.loc[(dataT[u'空气等级']=='I'),u'空气等级']=1
dataT.loc[(dataT[u'空气等级']=='II'),u'空气等级']=2
dataT.loc[(dataT[u'空气等级']=='III'),u'空气等级']=3
dataT.loc[(dataT[u'空气等级']=='IV'),u'空气等级']=4
dataT.loc[(dataT[u'空气等级']=='V'),u'空气等级']=5
dataT.loc[(dataT[u'空气等级']=='VI'),u'空气等级']=6
dataT.loc[(dataT[u'空气等级']=='VII'),u'空气等级']=7

dataT=dataT.as_matrix()

# 划分训练集和测试集
from sklearn.cross_validation import train_test_split
train,test,train_target,test_target=train_test_split(dataT[:,:6],dataT[:,6],test_size = 0.2)
train_target=train_target.astype(int)
test_target=test_target.astype(int)

首先用SVC进行模型训练
# SVC训练 
from sklearn import svm
model = svm.SVC()
model.fit(train, train_target)
model.score(test, test_target) #测试模型打分:只有54%

#发现原始数据都是0.00几。都比较小,数值太小区分度不够,因此对其*100
from sklearn import svm
model = svm.SVC()
model.fit(train*100, train_target)
model.score(test*100, test_target) #预测精度达到86%,但是训练集达到99% 过拟合了,

#重新调整*30
# SVC训练 
from sklearn import svm
model = svm.SVC()
model.fit(train*30, train_target)
model.score(test*30, test_target)
model.score(train*30, train_target)
#c测试集和训练集得分都为92%左右,结果较好。



#同样的数据用决策树进行:
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import cross_val_score
clf = DecisionTreeClassifier(max_depth=8) # 参数max_depth设置树最大深度

# 交叉验证,评价分类器性能,此处选择的评分标准是ROC曲线下的AUC值,对应AUC更大的分类器效果更好
scores = cross_val_score(clf, train*30, train_target, cv=3) 
print("ROC AUC Decision Tree: ", np.mean(scores), np.std(scores))
# np.mean(scores) 为0.96067821067821069  np.std(scores)为:0.031432930607203445

from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, X, y, ylim=(0, 1.1), cv=3,
                        n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5),
                        scoring=None):
    plt.title("Learning curves for %s" % type(estimator).__name__)
    plt.ylim(*ylim); plt.grid()
    plt.xlabel("Training examples")
    plt.ylabel("Score")
    train_sizes, train_scores, validation_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes,
        scoring=scoring)
    train_scores_mean = np.mean(train_scores, axis=1)
    validation_scores_mean = np.mean(validation_scores, axis=1)

    plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
             label="Training score")
    plt.plot(train_sizes, validation_scores_mean, 'o-', color="g",
             label="Cross-validation score")
    plt.legend(loc="best")
    return validation_scores_mean[-1]

clf = DecisionTreeClassifier(max_depth=8)
mean8=plot_learning_curve(clf, train, train_target) #得分为0.95310245310245312

这里写图片描述

#利用validation_curve计算不同深度训练集和测试集交叉验证得分
from sklearn.learning_curve import validation_curve
def plot_validation_curve(estimator, X, y, param_name, param_range,
                          ylim=(0, 1.1), cv=3, n_jobs=1, scoring=None):
    estimator_name = type(estimator).__name__
    plt.title("Validation curves for %s on %s"
              % (param_name, estimator_name))
    plt.ylim(*ylim); plt.grid()
    plt.xlim(min(param_range), max(param_range))
    plt.xlabel(param_name)
    plt.ylabel("Score")

    train_scores, test_scores = validation_curve(
        estimator, X, y, param_name, param_range,
        cv=cv, n_jobs=n_jobs, scoring=scoring)

    train_scores_mean = np.mean(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    plt.semilogx(param_range, train_scores_mean, 'o-', color="r",
                 label="Training score")
    plt.semilogx(param_range, test_scores_mean, 'o-', color="g",
                 label="Cross-validation score")
    plt.legend(loc="best")
    print("Best test score: {:.4f}".format(test_scores_mean[-1]))

clf = DecisionTreeClassifier(max_depth=8)
param_name = 'max_depth'
param_range = [1, 2, 3, 4, 5, 6,7,8,9,10,11,12,13,14,15]

plot_validation_curve(clf, train*30, train_target,
                      param_name, param_range)

这里写图片描述

从上面分析可以看出,使用决策树效果比SVM效果要好。

如果使用决策树效果不好,可以利用随机森里来提升分类效果。
从上图可以看出,使用决策树相关较好,而且train*30和不*30 ,效果差别不大。

参考:https://blog.csdn.net/sun_shengyun/article/details/52605611

猜你喜欢

转载自blog.csdn.net/qq_26645205/article/details/80420038
今日推荐