python机器学习及实践-第二章

1.监督学习代码

import pandas as pd
import numpy as np
column_names=['Sample code number','Clump Thickness','Uniformity of Cell Size','Uniformity of Cell Shape','Marginal Adhesion','Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class']
#read_csv还可以在线读取数据
data=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data',names=column_names)
#nan用来标识未标记的数据 和None类似 但又有一些区别
data=data.replace(to_replace='?',value=np.nan)
#DataFrame.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)
#axis:
#axis=0: 删除包含缺失值的行
#axis=1: 删除包含缺失值的列
#how: 与axis配合使用
#how=‘any’ :只要有缺失值出现,就删除该行货列
#how=‘all’: 所有的值都缺失,才删除行或列
#thresh: axis中至少有thresh个非缺失值,否则删除
#比如 axis=0,thresh=10:标识如果该行中非缺失值的数量小于10,将删除改行
#subset: list
#在哪些列中查看是否有缺失值
#inplace: 是否在原数据上操作。如果为真,返回None否则返回新的copy,去掉了缺失值
data=data.dropna(how='any')
data.shape
#逻辑回归 梯度下降分类
#train_test_split用于分割数据
from sklearn.cross_validation import train_test_split 
#X_train,X_test, y_train, y_test = cross_validation.train_test_split(train_data,train_target,test_size=0.4, random_state=0)
X_train,X_test, y_train, y_test = train_test_split(data[column_names[1:10]],data[column_names[10]],test_size=0.25, random_state=33)
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
#标准化数据 类似即归一化
ss=StandardScaler()
X_train=ss.fit_transform(X_train)
X_test=ss.fit_transform(X_test)
lr=LogisticRegression()
sgdc=SGDClassifier()
lr.fit(X_train,y_train)
lr_y_predict=lr.predict(X_test)
sgdc.fit(X_train,y_train)
sgdc_y_predict=sgdc.predict(X_test)
from sklearn.metrics import classification_report
print('Accuracy of LR Classifier:',lr.score(X_test,y_test))
#classification_report 输出准确率 F1值等模板
print(classification_report(y_test,lr_y_predict,target_names=['Benign','Malignant']))
print('Accuracy of SGD Classifier:',sgdc.score(X_test,y_test))
#classification_report 输出准确率 F1值等模板
print(classification_report(y_test,sgdc_y_predict,target_names=['Benign','Malignant']))
#支持向量机
#导入手写数字识别数据
#type 获取数据类型 dtype获取数组数据类型  astype()修改数据类型
from sklearn.datasets import load_digits
digits=load_digits()
digits.data.shape
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(digits.data,digits.target,test_size=0.25,random_state=33)
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
ss=StandardScaler()
X_train=ss.fit_transform(X_train)
X_test=ss.fit_transform(X_test)
lsvc=LinearSVC()
lsvc.fit(X_train,y_train)
y_predict=lsvc.predict(X_test)
print('Accuracy of LinearSVC :',lsvc.score(X_test,y_test))
print(classification_report(y_test,y_predict,target_names=digits.target_names.astype(str)))
#朴素贝叶斯分类
from sklearn.datasets import fetch_20newsgroups
news=fetch_20newsgroups( )
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(news.data,news.target,test_size=0.25,random_state=33)
#将文本转化成特征向量
from sklearn.feature_extraction.text import CountVectorizer
vec=CountVectorizer()
X_train=vec.fit_transform(X_train)
X_test=vec.transform(X_test)
from sklearn.naive_bayes import MultinomialNB
mnb=MultinomialNB()
mnb.fit(X_train,y_train)
y_predict=mnb.predict(X_test)
from sklearn.metrics import classification_report
print('Accuracy of Mulitinomial:',mnb.score(X_test,y_test))
print(classification_report(y_test,y_predict,target_names=news.target_names))
#k-近邻
from sklearn.datasets import load_iris
iris=load_iris( )
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(iris.data,iris.target,test_size=0.25,random_state=33)
from sklearn.preprocessing import StandardScaler
ss=StandardScaler()
X_train=ss.fit_transform(X_train)
X_test=ss.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
knc=KNeighborsClassifier()
knc.fit(X_train,y_train)
y_predict=knc.predict(X_test)
from sklearn.metrics import classification_report
print('Accuracy of Mulitinomial:',knc.score(X_test,y_test))
print(classification_report(y_test,y_predict,target_names=iris.target_names))
#决策树
import pandas as pd
titanic=pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')


# head()默认查看前5行数据 tail默认查看倒数5行数据 head(num)默认查看前num行数据 tail同理
titanic.head()
titanic.info()
X=titanic[['pclass','age','sex']]
y=titanic['survived']
X.info
X['age'].fillna(X['age'].mean(),inplace=True)
X.info
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=33)
#DictVectorizer 将非数值的东西数值化 数值化的东西保持不变 比如 性别 无非就是男女 按照分类的思想 用0表示男 1表示女
from sklearn.feature_extraction import DictVectorizer
dv=DictVectorizer(sparse=False)
X_train=dv.fit_transform(X_train.to_dict(orient='record'))
X_test=dv.fit_transform(X_test.to_dict(orient='record'))
from sklearn.tree import DecisionTreeClassifier
dtc=DecisionTreeClassifier()
dtc.fit(X_train,y_train)
y_predict=dtc.predict(X_test)
print('Accuracy of Mulitinomial:',dtc.score(X_test,y_test))
from sklearn.metrics import classification_report
print(classification_report(y_test,y_predict,target_names=['died','survived']))
#决策树 随机森林 梯度提升决策树
import pandas as pd
titanic=pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
# head()默认查看前5行数据 tail默认查看倒数5行数据 head(num)默认查看前num行数据 tail同理
titanic.head()
titanic.info()
X=titanic[['pclass','age','sex']]
y=titanic['survived']
X.info
X['age'].fillna(X['age'].mean(),inplace=True)
X.info
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=33)
#DictVectorizer 将非数值的东西数值化 数值化的东西保持不变 比如 性别 无非就是男女 按照分类的思想 用0表示男 1表示女
from sklearn.feature_extraction import DictVectorizer
dv=DictVectorizer(sparse=False)
X_train=dv.fit_transform(X_train.to_dict(orient='record'))
X_test=dv.fit_transform(X_test.to_dict(orient='record'))
#单个决策树
from sklearn.tree import DecisionTreeClassifier
dtc=DecisionTreeClassifier()
dtc.fit(X_train,y_train)
y_predict=dtc.predict(X_test)
print('Accuracy of Mulitinomial:',dtc.score(X_test,y_test))
from sklearn.metrics import classification_report
print(classification_report(y_test,y_predict,target_names=['died','survived']))
#随机森林
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier()
rfc.fit(X_train,y_train)
y_predict=rfc.predict(X_test)
print('Accuracy of Mulitinomial:',rfc.score(X_test,y_test))
from sklearn.metrics import classification_report
print(classification_report(y_test,y_predict,target_names=['died','survived']))
#梯度提升决策树
from sklearn.ensemble import GradientBoostingClassifier
gbc=GradientBoostingClassifier()
gbc.fit(X_train,y_train)
y_predict=gbc.predict(X_test)
print('Accuracy of Mulitinomial:',gbc.score(X_test,y_test))
from sklearn.metrics import classification_report
print(classification_report(y_test,y_predict,target_names=['died','survived']))
#回归预测
from sklearn.datasets import load_boston
boston=load_boston()
#.DESCR查看数据描述
from sklearn.cross_validation import train_test_split
import numpy as np
X=boston.data
y=boston.target
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=33,test_size=0.25)
#由于最大值、最小值 以及中间值差别较大 所以需要进行标准化处理
from sklearn.preprocessing import StandardScaler

#通过删除平均值和缩放到单位方差来标准化特征
#fit_transform 与transform的运行结果一样 但不可以替换 涉及到APId的调用 需要先fit
#python2 与python3的一些区别:(新版本中所有东西都必须是一个2D矩阵,即使是一个简单的column或row)y用reshape(-1,1)
from sklearn.preprocessing import StandardScaler
ss_X=StandardScaler()
ss_y=StandardScaler()
X_train=ss_X.fit_transform(X_train)
X_test=ss_X.transform(X_test)

y_train=ss_y.fit_transform(y_train.reshape(-1,1))
y_test=ss_y.transform(y_test.reshape(-1,1))

from sklearn.linear_model import LinearRegression
lr=LinearRegression()
lr.fit(X_train,y_train)
lr_y_predict=lr.predict(X_test)
from sklearn.linear_model import SGDRegressor
sgdr=SGDRegressor()
sgdr.fit(X_train,y_train)
sgdr_y_predict=sgdr.predict(X_test)
print("LinearRegression score:",lr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,lr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(lr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(lr_y_predict)))
print("SGDRegressor:",sgdr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,sgdr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(sgdr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(sgdr_y_predict)))
#不知道为什么inverse_transform没有效果???
#支持向量机
from sklearn.datasets import load_boston
boston=load_boston()
#.DESCR查看数据描述
from sklearn.cross_validation import train_test_split
import numpy as np
X=boston.data
y=boston.target
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=33,test_size=0.25)
#由于最大值、最小值 以及中间值差别较大 所以需要进行标准化处理
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
ss_X=StandardScaler()
ss_y=StandardScaler()
X_train=ss_X.fit_transform(X_train)
X_test=ss_X.transform(X_test)
y_train=ss_y.fit_transform(y_train.reshape(-1,1))
y_test=ss_y.transform(y_test.reshape(-1,1))

#线性核的支持向量机
from sklearn.svm import SVR
linear_svr=SVR(kernel='linear')
linear_svr.fit(X_train,y_train)
linear_svr_y_predict=linear_svr.predict(X_test)

#使用多项式核
poly_svr=SVR(kernel='poly')
poly_svr.fit(X_train,y_train)
poly_svr_y_predict=poly_svr.predict(X_test)

#使用径向基函数
rbf_svr=SVR(kernel='rbf')
rbf_svr.fit(X_train,y_train)
rbf_svr_y_predict=rbf_svr.predict(X_test)

print("linear_svr:",linear_svr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,linear_svr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(linear_svr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(linear_svr_y_predict)))
print("poly_svr:",poly_svr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,poly_svr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(poly_svr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(poly_svr_y_predict)))

print("rbf_svr:",rbf_svr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,rbf_svr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(rbf_svr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(rbf_svr_y_predict)))
#K近邻回归
from sklearn.neighbors import KNeighborsRegressor
#算数平均
uni_knr=KNeighborsRegressor(weights='uniform')
uni_knr.fit(X_train,y_train)
uni_knr_y_predict=uni_knr.predict(X_test)

#距离加权
dis_knr=KNeighborsRegressor(weights='distance')
dis_knr.fit(X_train,y_train)
dis_knr_y_predict=dis_knr.predict(X_test)

print("uni_knr:",uni_knr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,uni_knr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(uni_knr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(uni_knr_y_predict)))
print("dis_knr:",dis_knr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,dis_knr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(dis_knr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(dis_knr_y_predict)))

#回归树
from sklearn.tree import DecisionTreeRegressor
dtr=DecisionTreeRegressor()
dtr.fit(X_train,y_train)
dtr_y_predict=dtr.predict(X_test)

print("dtr:",dtr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,dtr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(dtr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(dtr_y_predict)))
#集成模型:随机森林、极端随机森林、提升树
from sklearn.ensemble import RandomForestRegressor,ExtraTreesRegressor,GradientBoostingRegressor
#随机森林
dfr=RandomForestRegressor()
dfr.fit(X_train,y_train)
dfr_y_predict=dfr.predict(X_test)
#极端随机森林
etr=ExtraTreesRegressor()
etr.fit(X_train,y_train)
etr_y_predict=etr.predict(X_test)
#提升树
gbr=GradientBoostingRegressor()
gbr.fit(X_train,y_train)
gbr_y_predict=gbr.predict(X_test)

print("RandomForestRegressor:",dfr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,dfr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(dfr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(dfr_y_predict)))
print("ExtraTreesRegressor:",etr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,etr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(etr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(etr_y_predict)))

print("GradientBoostingRegressor:",gbr.score(X_test,y_test))
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
print("R-squared:",r2_score(y_test,gbr_y_predict))
print("MSE:",mean_squared_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(gbr_y_predict)))
print("MAE:",mean_absolute_error(ss_y.inverse_transform(y_test),ss_y.inverse_transform(gbr_y_predict)))

print(np.sort(list(zip(etr.feature_importances_,boston.feature_names)),axis=0))
  • 分析步骤:

1.首先读入数据 ,可以从在线连接读取,或者从sklearn模块导入。

2.数据预处理:看数据是否存在缺失,将缺失的数据删掉或者补全。其中None和NAN都可以用来标明空缺数据。其中None是python自带的,而NAN是pandas,nmpy里的。None是一种特殊类型,而NAN是一种特殊的float型,在一些数据操作中二者都会被替换为1。将数据集分割成训练集与测试集,利用sklearn里的函数tran_test_split划分。从sklearn中导入standscaler模块,进行数据标准化处理。其中fit_tranform用于训练数据,transform用于测试数据。fit_transform先fit然后transform。如果给测试数据也用fit_transform,最终的实验结果会产生很大的偏差。

3.从sklearn中导入相应的函数,进行fit,和predict.

4.根据classification_report等函数,输出最后的评价结果。

2.非监督代码

#非监督学习:K均值方法
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
digits_train=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tra',header=None)
digits_test=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tes',header=None)
#分离64维像素特征 1维数字目标
X_train=digits_train[np.arange(64)]
y_train=digits_train[64]

X_test=digits_test[np.arange(64)]
y_test=digits_test[64]

from sklearn.cluster import KMeans
kmeans=KMeans(n_clusters=10)
kmeans.fit(X_train)
kmeans_y_predict=kmeans.predict(X_test)
#判断正确性的两种办法:ARI和轮廓系数
from sklearn import metrics
print(metrics.adjusted_rand_score(y_test,kmeans_y_predict))
#用轮廓系数
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
#分割出3*2个子图 并且在1号子图中作图
plt.subplot(3,2,1)
#初始化原始数据点
x1=np.array([1,2,3,1,5,6,5,5,6,7,8,9,7,9])
x2=np.array([1,3,2,2,8,6,7,6,7,1,2,1,1,3])
#使用zip函数将列表中的元素一一打包成元组比如(1,1)
X=np.array(list(zip(x1,x2))).reshape(len(x1),2)
#在1号组图中做出原始数据点阵的分布
#xlim((xmin,max))  #设置坐标轴的最大最小区间同理ylim
plt.xlim([0, 10])
plt.ylim([0, 10])
plt.title('Instances')
plt.scatter(x1, x2)
#plt.show()
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'b']
markers = ['o', 's', 'D', 'v', '^', 'p', '*', '+']

clusters = [2, 3, 4, 5, 8]
subplot_counter=1
sc_scores=[]
for t in clusters:
    subplot_counter+=1
    plt.subplot(3,2,subplot_counter)
    kmeans_model=KMeans(n_clusters=t).fit(X)
    #enumerate把数据对象变成一个索引序列 可以看到下标和内容:比如l=['k','m'] 变成 l=[(0,'k),(1,'m')]
    for i,l in enumerate(kmeans_model.labels_):
        plt.plot(x1[i],x2[i],color=colors[l],marker=markers[l],ls='None')
    plt.xlim([0,10])
    plt.ylim([0,10])
    sc_score=silhouette_score(X,kmeans_model.labels_,metric='euclidean')
    #print("sc_score:",sc_score)
    sc_scores.append(sc_score)
    #print("sc_score:",sc_scores)
    #print(len(sc_scores))
plt.title('K=%s,silhouette coefficient=%0.03f'%(t,sc_score))
plt.figure()
plt.plot(clusters,sc_scores,'*-')
plt.xlabel('Number of Clusters')
plt.ylabel('silhouette coefficient Score')
plt.show()
#使用肘部观察法
import numpy as np
from sklearn.cluster import KMeans
#距离计算库 计算欧式距离
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
#uniform(x,y)随机生成大小在x与y之间的实数 数组为2*10的大小
clusters1=np.random.uniform(0.5,1.5,(2,10))
clusters2=np.random.uniform(0.5,1.5,(2,10))
clusters3=np.random.uniform(0.5,1.5,(2,10))
#np.stack()将数据在某一维上进行堆叠 比如 array=[[1,2],[3,4]] np.stack(arrays,axis=1)=[[1,3],[2,4]] axis=0 则和原来的array相同
#np.hstack()按列进行堆叠
#np.vstack()按行进行堆叠axis=0
#axis=0 表示沿着每一列或按行标签 axis=1表示沿着每一行或者按照列标签
X=np.hstack((clusters1,clusters2,clusters3)).T
plt.scatter(X[:,0],X[:,1])
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
#设置不同的聚类中心
K=range(1,10)
meandistortions=[]
for k in K:
    kmeans=KMeans(n_clusters=k)
    kmeans.fit(X)
    meandistortions.append(sum(np.min(cdist(X,kmeans.cluster_centers_,'euclidean'),axis=1))/ X.shape[0])
plt.plot(K,meandistortions,'bx-')
plt.xlabel('k')
plt.ylabel('Average Dispersion')
plt.title('Selectinh k with the Elbow Method')
plt.show()
#数据降维 主成分分析
import numpy as np
M=np.array([[1,2],[2,4]])
#计算矩阵的秩
np.linalg.matrix_rank(M,tol=None)
#PCA
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
digits_train=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tra',header=None)
digits_test=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/optdigits/optdigits.tes',header=None)
#分离64维像素特征 1维数字目标
X_digits=digits_train[np.arange(64)]
y_digits=digits_train[64]
X_train=digits_train[np.arange(64)]
y_train=digits_train[64]

X_test=digits_test[np.arange(64)]
y_test=digits_test[64]
from sklearn.decomposition  import PCA
estimator=PCA(n_components=2)
X_pca=estimator.fit_transform(X_digits)
from matplotlib import pyplot as plt
def plot_pca_scatter():
    colors=['black','blue','purple','yellow','white','red','lime','cyan','orange','gray']
    #xrange()与range相同 只是返回的是一个生成器 python 3.5中xrange与range合并为range
    for i in range(len(colors)):
        px=X_pca[:,0][y_digits.as_matrix()==i]#其选择作用 选择y_digits.as_matrix()==i的行(0是种颜色 以此类推)
        py=X_pca[:,1][y_digits.as_matrix()==i]
        plt.scatter(px,py,c=colors[i])
    plt.legend(np.arange(0,10).astype(str))
    plt.xlabel('First Principal Component')
    plt.ylabel('Second Principal Component')
    plt.show()
plot_pca_scatter()

from sklearn.svm import LinearSVC
svc=LinearSVC()
svc.fit(X_train,y_train)
svc_y_predict=svc.predict(X_test)

#将图像从64维压缩到20维
estimator=PCA(n_components=20)
pca_X_train=estimator.fit_transform(X_train)
pca_X_test=estimator.transform(X_test)

pca_svc=LinearSVC()
pca_svc.fit(pca_X_train,y_train)
pca_y_predict=pca_svc.predict(pca_X_test)

from sklearn.metrics import classification_report
print("svc:",svc.score(X_test,y_test))
print('svc classification report:',classification_report(y_test,svc_y_predict,target_names=np.arange(10).astype(str)))
print("pca_svc:",pca_svc.score(pca_X_test,y_test))
print('pca_svc classification report:',classification_report(y_test,pca_y_predict,target_names=np.arange(10).astype(str)))
#fit_transform 和transform会对结果造成影响 tansform是用训练数据fitd的结果进行标准化 
  • 学到的知识:

enumerate把数据对象变成一个索引序列 可以看到下标和内容:比如l=['k','m'] 变成 l=[(0,'k),(1,'m')]

使用zip函数将列表中的元素一一打包成元组比如(1,1)

非监督学习判断正确性的两种办法:ARI和轮廓系数:

ARI用于被评估的数据集本身带有正确的类别信息,可以直接用metrics.adjusted_rand_score(y_test,y_predict)

轮廓系数:首先计算第i个样本与其所在聚类所有其他样本的距离的平均值,记作a。其次通过计算第i个样本与其他聚类中的样本距离的平均值,选择最小的一个平均值,记作b。轮廓系数为sc=\frac{b-a}{max(b,a)}

  • 疑惑(没有解决的问题)

1.不知道为什么inverse_transform没有效果,对有些数据有效果,有些数据没有效果?

猜你喜欢

转载自blog.csdn.net/BRAVE_NO1/article/details/88390448