Untitled7

SVM算法了解

SVM使用多项式核解决

import numpy as np 
import matplotlib.pyplot as plt 
from sklearn import datasets 
X, y = datasets.make_moons() #使用生成的数据 
print(X.shape) # (100,2)
print(y.shape) # (100,)
 

X, y = datasets.make_moons(noise=0.15,random_state=777) #随机生成噪声点,random_state是随机种子,noise是方差 
plt.scatter(X[y==0,0],X[y==0,1]) 
plt.scatter(X[y==1,0],X[y==1,1])
plt.show()
(100, 2)
(100,)

我们接下来通过多项式特征的SVM来对它进行分类。这里我们引入了管道,它可以将许多算法模型串联起来,比如将特征提取、归一化、分类组织在一起形成一个典型的机器学习问题工作流。

可视化展示结果

def plot_decision_boundary(model, axis): 
    x0,x1 = np.meshgrid(np.linspace(axis[0],axis[1],int((axis[1]-axis[0])*100)).reshape(-1,1),np.linspace(axis[2],axis[3],int((axis[3]-axis[2])*100)).reshape(-1,1) ) 
    X_new = np.c_[x0.ravel(), x1.ravel()] 
    y_predict = model.predict(X_new) 
    zz = y_predict.reshape(x0.shape) 
    from matplotlib.colors import ListedColormap 
    custom_cmap = ListedColormap(['#EF9A9A','#FFF59D','#90CAF9']) 
    plt.contourf(x0, x1, zz, cmap=custom_cmap) # 绘制决策边界

from sklearn.preprocessing import PolynomialFeatures,StandardScaler 
from sklearn.svm import LinearSVC 
from sklearn.pipeline import Pipeline 
def PolynomialSVC(degree,C=1.0): 
    return Pipeline([("poly",PolynomialFeatures(degree=degree)),("std_scaler",StandardScaler()),("linearSVC",LinearSVC(C=C))])
poly_svc = PolynomialSVC(degree=3) 
poly_svc.fit(X,y) 
plot_decision_boundary(poly_svc,axis=[-1.5,2.5,-1.0,1.5])
plt.scatter(X[y==0,0],X[y==0,1]) 
plt.scatter(X[y==1,0],X[y==1,1])
plt.show()

在这里插入图片描述

可以看到,生成的边界不是线性的直线了。我们还可以使用核技巧来对数据进行处理,使其维度提升,使原本线性不可分的数据,在高维空间变成线性可分的。再用线性SVM来进行处理

SVM使用核函数

生成数据集

import numpy as np 
import matplotlib.pyplot as plt
from sklearn import datasets 
X,y = datasets.make_moons(noise=0.15,random_state=777)
plt.scatter(X[y==0,0],X[y==0,1]) 
plt.scatter(X[y==1,0],X[y==1,1]) 
plt.show()

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-JGVON9jO-1636810468141)(output_7_0.png)]

使用高斯核,同时将gamma设置为100

from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC 
from sklearn.pipeline import Pipeline 
def RBFKernelSVC(gamma=1.0): 
    return Pipeline([ ('std_scaler',StandardScaler()), ('svc',SVC(kernel='rbf',gamma=gamma))])
svc = RBFKernelSVC(100) 
svc.fit(X,y) 
plot_decision_boundary(svc,axis=[-1.5,2.5,-1.0,1.5]) 
plt.scatter(X[y==0,0],X[y==0,1]) 
plt.scatter(X[y==1,0],X[y==1,1]) 
plt.show()

请添加图片描述

很明显,训练出的模型过拟合。再将其设置为0.1

svc = RBFKernelSVC(0.1) 
svc.fit(X,y) 
plot_decision_boundary(svc,axis=[-1.5,2.5,-1.0,1.5]) 
plt.scatter(X[y==0,0],X[y==0,1]) 
plt.scatter(X[y==1,0],X[y==1,1]) 
plt.show()

请添加图片描述

这时训练出的模型欠拟合。这说明gamma变量相当于再调整模型复杂度,再实际使用时应根据具体情况调整。

人脸识别并在原基础上绘制68个特征点

所需库及亮度调节:

import numpy as np
import cv2
import dlib
import os
import sys
import random
# 存储位置
output_dir = 'D:/faces'
size = 64
 
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# 改变图片的亮度与对比度
def relight(img, light=1, bias=0):
    w = img.shape[1]
    h = img.shape[0]
    #image = []
    for i in range(0,w):
        for j in range(0,h):
            for c in range(3):
                tmp = int(img[j,i,c]*light + bias)
                if tmp > 255:
                    tmp = 255
                elif tmp < 0:
                    tmp = 0
                img[j,i,c] = tmp
    return img

相关特征处理及绘制

#使用dlib自带的frontal_face_detector作为我们的特征提取器
detector = dlib.get_frontal_face_detector()
# 打开摄像头 参数为输入流,可以为摄像头或视频文件
camera = cv2.VideoCapture(0)
#camera = cv2.VideoCapture('C:/Users/CUNGU/Videos/Captures/wang.mp4')
ok = True
j=0 
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('D:/myworks/shape_predictor_68_face_landmarks.dat')
while ok:
    # 读取摄像头中的图像,ok为是否读取成功的判断参数
    ok, img = camera.read()   
    # 转换成灰度图像
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    rects = detector(img_gray, 0)
    
    for i in range(len(rects)):
        landmarks = np.matrix([[p.x, p.y] for p in predictor(img,rects[i]).parts()])
        for idx, point in enumerate(landmarks):
            # 68点的坐标
            pos = (point[0, 0], point[0, 1])
            if j<68:
                print(idx,pos)
    
            # 利用cv2.circle给每个特征点画一个圈,共68个
            cv2.circle(img, pos, 2, color=(0, 255, 0))
            # 利用cv2.putText输出1-68
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(img, str(idx+1), pos, font, 0.2, (0, 0, 255), 1,cv2.LINE_AA)
            j=j+1
    cv2.imshow('video', img)
    k = cv2.waitKey(1)
    if k == 27:    # press 'ESC' to quit
        break
    
camera.release()
cv2.destroyAllWindows()
提取所得特征值如下
0 (253, 125)
1 (256, 150)
2 (259, 174)
3 (263, 197)
4 (273, 218)
5 (288, 235)
6 (307, 249)
7 (329, 261)
8 (352, 264)
9 (374, 262)
10 (394, 250)
11 (411, 235)
12 (425, 218)
13 (434, 198)
14 (438, 176)
15 (442, 153)
16 (443, 130)
17 (277, 102)
18 (289, 91)
19 (306, 87)
20 (324, 90)
21 (340, 97)
22 (373, 98)
23 (388, 91)
24 (405, 90)
25 (420, 94)
26 (431, 105)
27 (357, 122)
28 (357, 135)
29 (358, 148)
30 (358, 162)
31 (338, 177)
32 (347, 179)
33 (357, 182)
34 (365, 180)
35 (374, 177)
36 (296, 125)
37 (306, 120)
38 (319, 120)
39 (329, 127)
40 (318, 129)
41 (306, 129)
42 (381, 127)
43 (392, 121)
44 (403, 121)
45 (413, 127)
46 (404, 131)
47 (392, 130)
48 (320, 212)
49 (333, 203)
50 (347, 198)
51 (356, 200)
52 (364, 198)
53 (376, 202)
54 (387, 211)
55 (376, 221)
56 (364, 225)
57 (355, 226)
58 (346, 226)
59 (332, 222)
60 (326, 212)
61 (347, 209)
62 (356, 209)
63 (364, 209)
64 (381, 212)
65 (364, 212)
66 (356, 213)
67 (346, 213)

描绘得到图像如下所示
请添加图片描述

绘制显示特征点,而是给人脸虚拟P上一付墨镜

导入所需库,以及图象亮度调节

import numpy as np
import cv2
import dlib
import os
import sys
import random
# 存储位置
output_dir = 'D:/faces'
size = 64
 
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
def relight(img, light=1, bias=0):
    w = img.shape[1]
    h = img.shape[0]
    #image = []
    for i in range(0,w):
        for j in range(0,h):
            for c in range(3):
                tmp = int(img[j,i,c]*light + bias)
                if tmp > 255:
                    tmp = 255
                elif tmp < 0:
                    tmp = 0
                img[j,i,c] = tmp
    return img

相关图像处理(加’墨镜’)

detector = dlib.get_frontal_face_detector()
camera = cv2.VideoCapture(0)
ok = True
j=0 
detector = dlib.get_frontal_face_detector()
#使用5点特征点,方便找出所需特征点
predictor = dlib.shape_predictor('D:/myworks/shape_predictor_5_face_landmarks.dat')
while ok:
    ok, img = camera.read()   
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    pot1 = (0,0)
    pot2 = (0,0)
    t = (0,0)
    rects = detector(img_gray, 0)
    for i in range(len(rects)):
        landmarks = np.matrix([[p.x, p.y] for p in predictor(img,rects[i]).parts()])
        r=0
        for idx, point in enumerate(landmarks):
            pos = (point[0, 0], point[0, 1])
            if idx in (0,2):
                pot1 = pos
            if idx in (1,3):
                if idx == 1:
                    t = pos
                pot2 = pos
                x = abs(pot1[0] + pot2[0])
                y = abs(pot1[1] + pot2[1])
                if idx == 1:
                    r = np.sqrt(abs(np.square(x)-np.square(y)))
                temp = (int(x/2),int(y/2))
                cv2.circle(img,temp,int(r/20), color=(0, 0, 0),thickness=-1) 
                cv2.line(img,pot2,t,color=(0,0,0), thickness=5)
            j=j+1
    cv2.imshow('video', img)
    k = cv2.waitKey(1)
    if k == 27:    # press 'ESC' to quit
        break
    
camera.release()
cv2.destroyAllWindows()

效果如下

请添加图片描述

おすすめ

転載: blog.csdn.net/weixin_45747542/article/details/121310635