Machine Learning Basics-Principal Component Analysis PCA-16

PCA (Principal Component Analysis)

Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here
Insert picture description here

PCA-simple example

import numpy as np
import matplotlib.pyplot as plt
# 载入数据
data = np.genfromtxt("data.csv", delimiter=",")
x_data = data[:,0]
y_data = data[:,1]
plt.scatter(x_data,y_data)
plt.show()
print(x_data.shape)

Insert picture description here

# 数据中心化
def zeroMean(dataMat):
    # 按列求平均,即各个特征的平均
    meanVal = np.mean(dataMat, axis=0) 
    newData = dataMat - meanVal
    return newData, meanVal
newData,meanVal=zeroMean(data)  
# np.cov用于求协方差矩阵,参数rowvar=0说明数据一行代表一个样本
covMat = np.cov(newData, rowvar=0)
# 协方差矩阵
covMat

Insert picture description here

# np.linalg.eig求矩阵的特征值和特征向量
eigVals, eigVects = np.linalg.eig(np.mat(covMat))
# 特征值
eigVals

Insert picture description here

# 特征向量
eigVects

Insert picture description here

# 对特征值从小到大排序
eigValIndice = np.argsort(eigVals)
eigValIndice

Insert picture description here

top = 1
# 最大的top个特征值的下标
n_eigValIndice = eigValIndice[-1:-(top+1):-1]
n_eigValIndice

Insert picture description here

# 最大的n个特征值对应的特征向量
n_eigVect = eigVects[:,n_eigValIndice]
n_eigVect

Insert picture description here

# 低维特征空间的数据
lowDDataMat = newData*n_eigVect
lowDDataMat

Insert picture description here

# 利用低纬度数据来重构数据
reconMat = (lowDDataMat*n_eigVect.T) + meanVal
reconMat

Insert picture description here

# 载入数据
data = np.genfromtxt("data.csv", delimiter=",")
x_data = data[:,0]
y_data = data[:,1]
plt.scatter(x_data,y_data)

# 重构的数据
x_data = np.array(reconMat)[:,0]
y_data = np.array(reconMat)[:,1]
plt.scatter(x_data,y_data,c='r')
plt.show()

Insert picture description here

Handwritten digit recognition and dimensionality reduction visualization

from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
digits = load_digits()#载入数据
x_data = digits.data #数据
y_data = digits.target #标签

x_train,x_test,y_train,y_test = train_test_split(x_data,y_data) #分割数据1/4为测试数据,3/4为训练数据
x_data.shape

Insert picture description here

mlp = MLPClassifier(hidden_layer_sizes=(100,50) ,max_iter=500)
mlp.fit(x_train,y_train)

Insert picture description here

# 数据中心化
def zeroMean(dataMat):
    # 按列求平均,即各个特征的平均
    meanVal = np.mean(dataMat, axis=0) 
    newData = dataMat - meanVal
    return newData, meanVal

def pca(dataMat,top):
    # 数据中心化
    newData,meanVal=zeroMean(dataMat) 
    # np.cov用于求协方差矩阵,参数rowvar=0说明数据一行代表一个样本
    covMat = np.cov(newData, rowvar=0)
    # np.linalg.eig求矩阵的特征值和特征向量
    eigVals, eigVects = np.linalg.eig(np.mat(covMat))
    # 对特征值从小到大排序
    eigValIndice = np.argsort(eigVals)
    # 最大的n个特征值的下标
    n_eigValIndice = eigValIndice[-1:-(top+1):-1]
    # 最大的n个特征值对应的特征向量
    n_eigVect = eigVects[:,n_eigValIndice]
    # 低维特征空间的数据
    lowDDataMat = newData*n_eigVect
    # 利用低纬度数据来重构数据
    reconMat = (lowDDataMat*n_eigVect.T) + meanVal
    # 返回低维特征空间的数据和重构的矩阵
    return lowDDataMat,reconMat 
lowDDataMat,reconMat = pca(x_data,2)
# 重构的数据
x = np.array(lowDDataMat)[:,0]
y = np.array(lowDDataMat)[:,1]
plt.scatter(x,y,c='r')
plt.show()

Insert picture description here

predictions = mlp.predict(x_data)
# 重构的数据
x = np.array(lowDDataMat)[:,0]
y = np.array(lowDDataMat)[:,1]
plt.scatter(x,y,c=y_data)
plt.show()

Insert picture description here

lowDDataMat,reconMat = pca(x_data,3)
from mpl_toolkits.mplot3d import Axes3D  
x = np.array(lowDDataMat)[:,0]
y = np.array(lowDDataMat)[:,1]
z = np.array(lowDDataMat)[:,2]
ax = plt.figure().add_subplot(111, projection = '3d') 
ax.scatter(x, y, z, c = y_data, s = 10) #点为红色三角形 

plt.show()

Insert picture description here

Sklearn-handwritten digit dimensionality reduction prediction

from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from sklearn import decomposition
import matplotlib.pyplot as plt
digits = load_digits()#载入数据
x_data = digits.data #数据
y_data = digits.target #标签

x_train,x_test,y_train,y_test = train_test_split(x_data,y_data) #分割数据1/4为测试数据,3/4为训练数据
mlp = MLPClassifier(hidden_layer_sizes=(100,50) ,max_iter=500)
mlp.fit(x_train,y_train )

Insert picture description here

predictions = mlp.predict(x_test)
print(classification_report(predictions, y_test))
print(confusion_matrix(predictions, y_test))

Insert picture description here

pca = decomposition.PCA()
pca.fit(x_data)

Insert picture description here

# 方差
pca.explained_variance_

Insert picture description here

# 方差占比
pca.explained_variance_ratio_

Insert picture description here

variance = []
for i in range(len(pca.explained_variance_ratio_)):
    variance.append(sum(pca.explained_variance_ratio_[:i+1]))
plt.plot(range(1,len(pca.explained_variance_ratio_)+1), variance)
plt.show()

Insert picture description here

pca = decomposition.PCA(whiten=True,n_components=0.8)
pca.fit(x_data)

Insert picture description here

pca.explained_variance_ratio_

Insert picture description here

x_train_pca = pca.transform(x_train)
mlp = MLPClassifier(hidden_layer_sizes=(100,50) ,max_iter=500)
mlp.fit(x_train_pca,y_train )

Insert picture description here

x_test_pca = pca.transform(x_test)
predictions = mlp.predict(x_test_pca)
print(classification_report(predictions, y_test))
print(confusion_matrix(predictions, y_test))

Insert picture description here

Guess you like

Origin blog.csdn.net/qq_37978800/article/details/113859937