opencv学习记录2

2. OpenCV ML

使用OpenCV提供的ML实现简单机器学习算法

2.1 SVM

OpenCV3.x与OpenCV2.x中SVM的接口有了很大变化,在接口上使用了虚函数取代以前的定义。

2.1.1 训练过程示例

/* SVM训练过程tutorial*/
void test_opencv_svm_train()
{
    // two class classifcation
    int labels[4] = { 1, -1, -1, -1 };
    float trainingData[4][2] = { { 501, 10 }, { 255, 10 }, { 501, 255 }, { 10, 501 } };

    Mat trainingDataMat(4, 2, CV_32FC1, trainingData);//输入的训练数据格式:Mat CV_32FC1
    Mat labelsMat(4, 1, CV_32SC1, labels);

    Ptr<SVM> svm = SVM::create();     //创建支持向量机空模型,得到一个Ptr智能指针,指向SVM
    svm->setType(SVM::C_SVC);		 // 设置支持向量机的类型
    /*
    1、C_SVC : C类支撑向量分类机。 n类分组 (n≥2),容许用异常值处罚因子C进行不完全分类。
    2、NU_SVC : 类支撑向量分类机。n类似然不完全分类的分类器。参数为gamma代替C。
    3、ONE_CLASS : 单分类器,所有的练习数据提取自同一个类里,然后SVM建树了一个分界线以分别该类在特点空间中所占区域和其它类在特点空间中所占区域。
    4、EPS_SVR : 用于回归。练习集中的特征向量和拟合出来的超平面的间隔须要小于p。异常值处罚因子C被采取。
    5、NU_SVR : 回归机,gamma代替p
    */
    svm->setC(1.0);		// 设置惩罚因子
    /*
     C是惩罚系数,即对误差的宽容度。
     C越高,说明越不能容忍出现误差,容易过拟合。
     C越小,容易欠拟合。C过大或过小,泛化能力变差
    */
    svm->setKernel(SVM::LINEAR); // 设置核函数类型
    /*
     LINEAR:线性核函数;

       POLY:多项式核函数;
      / -d用来设置多项式核函数的最高此项次数;默认值是3
      / -r用来设置核函数中的coef0,也就是公式中的第二个r,默认值是0。
       一般选择1-11:1 3 5 7 9 11,也可以选择2,4,6…

       RBF:径向机核函数【高斯核函数】;
      / -g用来设置核函数中的gamma参数设置,默认值是1/k(k是类别数)
      gamma是选择RBF函数作为kernel后,该函数自带的一个参数。
       隐含地决定了数据映射到新的特征空间后的分布,
       gamma越大,支持向量越少,gamma值越小,支持向量越多。
       支持向量的个数影响训练与预测的速度。

       SIGMOID:神经元的非线性作用函数核函数;
      / -g用来设置核函数中的gamma参数设置,默认值是1/k(k是类别数)
      / -r用来设置核函数中的coef0,也就是公式中的第二个r,默认值是0

       PRECOMPUTED:用户自定义核函数
      */
    svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));//迭代次数,终止条件

    svm->train(trainingDataMat, ml::SampleTypes::ROW_SAMPLE, labelsMat);//输入训练数据进行训练 train(训练样本,训练样本存储格式,样本标签)
    																	//ml::SampleTypes::ROW_SAMPLE 表示训练数据是一行一行的

    //const string save_file = "test.xml"; // .xml, .yaml, .jsons
    svm->save("test_model.xml");				// 保存svm模型
    cout << "save sucessful" << endl;
}

2.1.2 测试过程示例

predict输出的结果一定要用float类型去读取

float re = svm->predict(predict_sample) // 每次输入一个样本,返回预测值re

Mat result(rows,cols,CV_32FC1)

float re1 = svm->predict(predict_samples,result)

读取每个测试样本的预测值:result.at<float>(i)

svm.predict使用的是alpha*sv*another-rho,如果为负的话则认为是正样本

/* SVM测试过程tutorial */
void test_opencv_svm_predict()
{
    const string model_file  = "test_model.xml";
    const int labels[]={ 1, 1, 1, 1, -1, -1, -1, -1 };//测试样本标签
    const vector<vector<float>>predictData = { { 490.f, 15.f }, { 480.f, 30.f }, { 511.f, 40.f }, { 473.f, 50.f },
        { 2.f, 490.f }, { 100.f, 200.f }, { 247.f, 223.f }, {510.f, 400.f} };//测试样本

    //Mat sampleMat = (Mat_<float>(1, 2) << j, i);
    //const int feature_length = 2;
    const int predict_count = (int)predictData.size();
    double correct_num = 0;
    float correct_acc = 0;
    Ptr<SVM> svm = SVM::load(model_file);//读取模型
    for (int i = 0; i < predict_count; i++)
    {
        Mat prdictMat = (Mat_<float>(1, 2) << predictData[i][0],predictData[i][1]);
        float response = svm->predict(prdictMat);//使用svm->predict() 进行预测 这里一定要用float类型
        if (labels[i] == response) correct_num++;
        printf("actual class: %d, target calss: %f\n", labels[i], response);
    }
    correct_acc = (correct_num / predict_count) * 100;
    printf("correct_acc = %.2f%\n",correct_acc);
}
/*测试样本一次性输入*/
const string model_file  = "test_model.xml";
const int labels[]={ 1, 1, 1, 1, -1, -1, -1, -1 };
float predictData[8][2] = { { 490.f, 15.f }, { 480.f, 30.f }, { 511.f, 40.f }, { 473.f, 50.f },
                           { 2.f, 490.f }, { 100.f, 200.f }, { 247.f, 223.f }, {510.f, 400.f} };

Mat predictMat(8,2,CV_32FC1,predictData); //这样依据数据创建Mat,好像只支持数组

double correct_num = 0;
float correct_acc = 0;
Ptr<SVM> svm = SVM::load(model_file);
Mat predictMat(8,2,CV_32FC1,predictData);
cout << predictMat << endl;

double correct_num = 0;
float correct_acc = 0;
Ptr<SVM> svm = SVM::load(model_file);

Mat reponse_Mat(8,1,CV_8FC1);
float rst = svm->predict(predictMat,reponse_Mat);

for(int i =0;i < 8;i++)
{
    cout << reponse_Mat.at<float>(i,0) << endl;
    if(reponse_Mat.at<float>(i,0) == labels[i])
        correct_num++;
}

correct_acc = (correct_num / 8) * 100;
printf("correct_acc = %.2f%\n",correct_acc);
/** @brief Predicts response(s) for the provided sample(s)

    @param samples The input samples, floating-point matrix
    @param results The optional output matrix of results.
    @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.
     */
CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;
/*获取SVM的各项参数*/
void svm_test()
{
    // visual representation
    int width = 512;
    int height = 512;
    Mat image = Mat::zeros(height, width, CV_8UC3);

    // training data
    int labels[4] = { 1, 0, 0, 0 };
    float trainingData[4][2] = { { 501, 10 }, { 255, 10 }, { 501, 255 }, { 10, 501 } };

    Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
    Mat labelsMat(4, 1, CV_32SC1, labels);

    cout << "trainingDataMat:\n" << trainingDataMat << endl;

    // initial SVM
    Ptr<ml::SVM> svm = ml::SVM::create();
    svm->setType(ml::SVM::Types::C_SVC);
    svm->setKernel(ml::SVM::KernelTypes::POLY); //使用多项式核函数,方便查看支持向量
    svm->setGamma(0.1);
    svm->setDegree(2);
//    svm->setDegree(2);
    svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

    // train operation
    svm->train(trainingDataMat, ml::SampleTypes::ROW_SAMPLE, labelsMat);//ml::SampleTypes::ROW_SAMPLE 表示训练数据是一行一行的

    int svdim = svm->getVarCount(); //Returns the number of variables in training samples
    cout << "svdim:" << svdim << endl;
    //线性核函数情况下,只有一个压缩的支持向量(原因未知)
    Mat svMat = svm->getSupportVectors();
    cout << "svMat = \n" << svMat << endl
         << "svMat.size = " << svMat.size << endl;
    /*
    svMat = 
        [501, 10;
         255, 10;
         501, 255]
    */
    Mat alphaMat;// = Mat::zeros(sn,svdim,CV_32F);//每个支持向量对应的参数α(拉格朗日乘子),默认alpha是float64的
    Mat svidxMat;// = Mat::zeros(1,sn,CV_64F);//支持向量所在的索引,在支持向量中的索引,label如何获得?怎么获得支持向量在原训练数据中的index?
    //The method returns rho parameter of the decision function, a scalar subtracted from the weighted sum of kernel responses.
    double b = svm->getDecisionFunction(0,alphaMat,svidxMat);
    cout << "alphaMat = \n" << alphaMat << endl << alphaMat.size() << endl;
    /*
    alphaMat = 
            [5.883922424074261e-09, 5.926219268115407e-09, -1.181014169218967e-08]
    */
    cout << "svidxMat = \n" << svidxMat << endl;
    /*
    svidxMat = 
   			 [1, 2, 0]
    */
    cout << "b = \n" << b << endl;
    /*
    int sn = svMat.rows;
    cout << "svMat =\n" << svMat << endl;
    printf("svdim = %d\n,svNum = %d\n",svdim,sn);


    Mat weights(1,svdim,CV_32FC1);
    for(int i = 0;i < sn;i++)
    {
        int index = svidxMat.at<int>(0,i);
        //cout << "index: \n" << index << endl;
        weights += alphaMat.at<float>(0,i) * labelsMat.at<int>(index,0) * trainingDataMat.row(index);
    }
    cout << "weights: \n" << weights << endl;

    float bias;
    float temp = 0;
    int j = 3;
    for(int i = 0;i < sn;i++)
    {
        int index = svidxMat.at<int>(0,i);
        j = svidxMat.at<int>(0,j);
        //cout << "index: \n" << index << endl;
        temp += alphaMat.at<float>(0,i) * labelsMat.at<int>(index,0) * (trainingDataMat.row(index).dot(trainingDataMat.row(j)));
    }
    bias = labelsMat.at<int>(j,0) - temp;
    cout << "bias = \n" << bias << endl;

    float w1 = weights.at<float>(0,0);
    float w2 = weights.at<float>(0,1);

    float p1x = 0;
    float p1y = (-bias - w1 * p1x) / w2; cout << p1y << endl;

    float p2x = 250;
    float p2y = (-bias - w1 * p2x) / w2;cout << p2y << endl;

    Point2f p1(p1x,p2x);
    Point2f p2(p2x,p2y);
    line(image,p1,p2,Scalar(0,255,0),1,LINE_8);
    */

    Mat weights = alphaMat.at<float>(0,0) * 1 * svMat.row(0);
    cout << "weights: \n" << weights << endl;
    float w1 = weights.at<float>(0,0);
    float w2 = weights.at<float>(0,1);

    float p1x = 0;
    float p1y = (-b - w1 * p1x) / w2; cout << p1y << endl;

    float p2x = 250;
    float p2y = (-b - w1 * p2x) / w2;cout << p2y << endl;

    Point2f p1(p1x,p2x);
    Point2f p2(p2x,p2y);
    line(image,p1,p2,Scalar(0,255,0),1,LINE_8);

    // prediction
#if 0
    Vec3b green(0, 255, 0);
    Vec3b blue(255, 0, 0);
    for (int i = 0; i < image.rows; i++)
    {
        for (int j = 0; j < image.cols; j++)
        {
            Mat sampleMat = (Mat_<float>(1, 2) << j, i);
            float respose = svm->predict(sampleMat);
//            cout << respose << "  ";
            if (respose == 1)
                image.at<Vec3b>(i, j) = green;
            else if (respose == 0)
                image.at<Vec3b>(i, j) = blue;
        }
//        cout << endl;
    }
#endif

    int thickness = -1;
    int lineType = LineTypes::LINE_8;

    circle(image, Point(501, 10), 5, Scalar(0, 0, 0), thickness, lineType);
    circle(image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType);
    circle(image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType);
    circle(image, Point(10, 501), 5, Scalar(255, 255, 255), thickness, lineType);

    thickness = 2;
    lineType = LineTypes::LINE_8;

    Mat sv = svm->getSupportVectors();
    for (int i = 0; i < sv.rows; i++)
    {
        const float* v = sv.ptr<float>(i);
        circle(image, Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
    }


    imshow("SVM Simple Example", image);


    waitKey(0);
}

2.1.3 训练完成后SVM各参数的获取

获取支持向量

/** @brief Retrieves all the support vectors
	获取支持向量
    The method returns all the support vectors as a floating-point matrix, where support vectors are stored as matrix rows.
*/
CV_WRAP virtual Mat getSupportVectors() const = 0;

/** @brief Retrieves all the uncompressed support vectors of a linear %SVM
	获取线性SVM未压缩的支持向量,压缩后只有一个
    The method returns all the uncompressed support vectors of a linear %SVM that the compressed support vector, used for prediction, was derived from. They are returned in a floating-point matrix, where the support vectors are stored as matrix rows.
*/
CV_WRAP Mat getUncompressedSupportVectors() const;

获取决策函数

    /** @brief Retrieves the decision function
	获得决策函数,返回偏置
    @param i the index of the decision function. If the problem solved is regression, 1-class 
    or 2-class classification, then there will be just one decision function and the index should 
    always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$decision functions.
    @param alpha the optional output vector for weights, corresponding to different support vectors.
        In the case of linear %SVM all the alpha's will be 1's.
    @param svidx the optional output vector of indices of support vectors within the matrix of
        support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear
        %SVM each decision function consists of a single "compressed" support vector.

    The method returns rho parameter of the decision function, a scalar subtracted from the weighted
    sum of kernel responses.
     */
    CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;

2.2 K-means

  • OpenCV中的Kmeans聚类

  • 实验了一维数据,二维数据


#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>

using namespace cv;
using namespace std;

// static void help()
// {
//     cout << "\nThis program demonstrates kmeans clustering.\n"
//             "It generates an image with random points, then assigns a random number of cluster\n"
//             "centers and uses kmeans to move those cluster centers to their representitive location\n"
//             "Call\n"
//             "./kmeans\n" << endl;
// }

void kmeans_test()
{
    const int MAX_CLUSTERS = 5;
    Scalar colorTab[] =
    {
        Scalar(0, 0, 255),
        Scalar(0,255,0),
        Scalar(255,100,100),
        Scalar(255,0,255),
        Scalar(0,255,255)
    };//最多5类,对应5种颜色

    Mat img(500, 500, CV_8UC3);
    RNG rng(12345);
//    for (int i = 1;i < 31;++i)
//    {
//        cout << rng.uniform(2,MAX_CLUSTERS + 1) << " ";
//        if(i % 10 == 0) cout << endl;
//    }

    while(1)
    {
        int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1);//生成(2,3,4,5)中一个数...k只是定义,没有初始化
        int i, sampleCount = rng.uniform(1, 1001);//样本数
        Mat points(sampleCount, 1, CV_32FC2), labels;//points是CV_32FC2类型,所以每个坐标是个Vec2f,Scalar(a,b)来赋值
        clusterCount = MIN(clusterCount, sampleCount);
        std::vector<Point2f> centers;//聚类中心

        /* generate random sample from multigaussian distribution */
        for( k = 0; k < clusterCount; k++ )
        {
            Point center;
            center.x = rng.uniform(0, img.cols);
            center.y = rng.uniform(0, img.rows);
            Mat pointChunk = points.rowRange(k*sampleCount/clusterCount,
                                             k == clusterCount - 1 ? sampleCount :
                                             (k+1)*sampleCount/clusterCount);//每次取出样本的sampleCount/clusterCount个
            rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x,center.y), Scalar(img.cols*0.05,img.rows*0.05));
        }

        randShuffle(points, 1, &rng);//打乱行

        double compactness = kmeans(points,                     //待聚类数据
                                    clusterCount,               //簇数
                                    labels,                     //labels表示每一个样本的类的标签,是一个整数,从0开始的索引整数,是簇数.
                                    TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 10, 1.0),
                                    3,                          //聚类3次,取结果最好的那次,
                                    KMEANS_PP_CENTERS,          //MEANS_PP_CENTERS:centers 初始化方法
                                    centers);                   //聚类中心
                                                                //返回 紧密度
        img = Scalar::all(0);
        Mat img2 = img.clone();
        for( i = 0; i < sampleCount; i++ )
        {
            int clusterIdx = labels.at<int>(i);
            Point2f ipt = points.at<Point2f>(i);
            //Point2f p(ipt,250 + clusterIdx*10);
            circle( img, ipt, 2, colorTab[clusterIdx], FILLED, LINE_AA );
            circle(img2,ipt,2,Scalar::all(255));
        }//显示聚类后的数据
        for (i = 0; i < (int)centers.size(); ++i)
        {
            Point2f c = centers[i];
            //Point2f p(c,250 + i*10);
            circle( img, c, 40, colorTab[i], 1, LINE_AA );
        }//聚类中心
        cout << "Compactness: " << compactness << endl;

        imshow("Before_cluster",img2);
        imshow("After_cluster", img);

        char key = (char)waitKey();
        if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
            break;
    }
}
/** @brief Finds centers of clusters and groups input samples around the clusters.

The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters
and groups the input samples around the clusters. As an output, \f$\texttt{labels}_i\f$ contains a
0-based cluster index for the sample stored in the \f$i^{th}\f$ row of the samples matrix.

@note
-   (Python) An example on K-means clustering can be found at
    opencv_source_code/samples/python/kmeans.py
@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
Examples of this array can be:
-   Mat points(count, 2, CV_32F);
-   Mat points(count, 1, CV_32FC2);
-   Mat points(1, count, CV_32FC2);
-   std::vector\<cv::Point2f\> points(sampleCount);
@param K Number of clusters to split the set by.
@param bestLabels Input/output integer array that stores the cluster indices for every sample.
@param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or
the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster
centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
@param attempts Flag to specify the number of times the algorithm is executed using different
initial labellings. The algorithm returns the labels that yield the best compactness (see the last
function parameter).
@param flags Flag that can take values of cv::KmeansFlags
@param centers Output matrix of the cluster centers, one row per each cluster center.
@return The function returns the compactness measure that is computed as
\f[\sum _i  \| \texttt{samples} _i -  \texttt{centers} _{ \texttt{labels} _i} \| ^2\f]
after every attempt. The best (minimum) value is chosen and the corresponding labels and the
compactness value are returned by the function. Basically, you can use only the core of the
function, set the number of attempts to 1, initialize labels each time using a custom algorithm,
pass them with the ( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best
(most-compact) clustering.
*/
CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,
                            TermCriteria criteria, int attempts,
                            int flags, OutputArray centers = noArray() );

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ikAQhvZz-1591965771785)(/home/swc/.config/Typora/typora-user-images/image-20200606211216036.png)]

2.3 PCA

PCA降维

 void DoPca(const Mat &_data, int dim, Mat &eigenvalues, Mat &eigenvectors)
   {
       assert( dim>0 );
       Mat data =  cv::Mat_<double>(_data);

       int R = data.rows;
       int C = data.cols;

       if ( dim>C )
           dim = C;

       //计算均值
       Mat m = Mat::zeros( 1, C, data.type() );

       for ( int j=0; j<C; j++ )
       {
           for ( int i=0; i<R; i++ )
           {
               m.at<double>(0,j) += data.at<double>(i,j);
           }
       }

       m = m/R;
       //求取6列数据对应的均值存放在m矩阵中,均值: [1.67、2.01、1.67、2.01、1.67、2.01]


       //计算协方差矩阵
       Mat S =  Mat::zeros( R, C, data.type() );
       for ( int i=0; i<R; i++ )
       {
           for ( int j=0; j<C; j++ )
           {
               S.at<double>(i,j) = data.at<double>(i,j) - m.at<double>(0,j); // 数据矩阵的值减去对应列的均值
           }
       }

       Mat Average = S.t() * S /(R);
       //计算协方差矩阵的方式----(S矩阵的转置 * S矩阵)/行数


       //使用opencv提供的eigen函数求特征值以及特征向量
       eigen(Average, eigenvalues, eigenvectors);
       cout << "===================================" << endl;
       cout << Average * (eigenvectors.row(0).t()) << endl;
       cout << eigenvalues.at<float>(0,0) * (eigenvectors.row(0).t()) << endl;
   }

   void pca_test()
   {
       float A[ 60 ]={
           1.5 , 2.3 , 1.5 , 2.3 , 1.5 , 2.3 ,
           3.0 , 1.7 , 3.0 , 1.7 , 3.0 , 1.7 ,
           1.2 , 2.9 , 1.2 , 2.9 , 1.2 , 2.9 ,
           2.1 , 2.2 , 2.1 , 2.2 , 2.1 , 2.2 ,
           3.1 , 3.1 , 3.1 , 3.1 , 3.1 , 3.1 ,
           1.3 , 2.7 , 1.3 , 2.7 , 1.3 , 2.7 ,
           2.0 , 1.7 , 2.0 , 1.7 , 2.0 , 1.7 ,
           1.0 , 2.0 , 1.0 , 2.0 , 1.0 , 2.0 ,
           0.5 , 0.6 , 0.5 , 0.6 , 0.5 , 0.6 ,
           1.0 , 0.9 , 1.0 , 0.9 , 1.0 , 0.9 };

       Mat DataMat = Mat::zeros( 10, 6, CV_32FC1 );

       //将数组A里的数据放入DataMat矩阵中
       for ( int i=0; i<10; i++ )
       {
           for ( int j=0; j<6; j++ )
           {
               DataMat.at<float>(i, j) = A[i * 6 + j];
           }
       }
       cout << "A = \n" << DataMat << endl;
       // OPENCV PCA
       /*
        * PCA(InputArray data, InputArray mean, int flags, int maxComponents = 0);//maxComponents:主成分个数
        * PCA(InputArray data, InputArray mean, int flags, double retainedVariance);//retainedVariance:主成分比重
       */
       
       PCA pca(DataMat, noArray(), PCA::DATA_AS_ROW,0.8);//保留80%

       cout << "(pca)eigenvalues =  \n " << pca.eigenvalues << endl;
       cout << "(pca)eigenvectors = \n" << pca.eigenvectors << endl;
       cout << "(pca)means = \n" << pca.mean << endl;
		//手动计算特征值,特征向量
       Mat eigenvalues;//特征值
       Mat eigenvectors;//特征向量

       DoPca(DataMat, 3, eigenvalues, eigenvectors);

       cout << "(my)eigenvalues =  \n " << eigenvalues << endl;
       cout << "(my)eigenvectors = \n" << eigenvectors << endl;


       Mat means = repeat(pca.mean,DataMat.rows/pca.mean.rows,DataMat.cols/pca.mean.cols);
       cout << "means: \n" << means << endl;
       //DataMat -= means;
       Mat myProMat = DataMat * pca.eigenvectors.t();
       cout << "(去均值)A = \n" << DataMat << endl;
       cout << "(my)投影后: \n" << myProMat << endl;
       Mat projectMat = pca.project(DataMat);
       //divide(I1,I2,dst,scale,int dtype=-1);//dst=saturate_cast(I1*scale/I2)
       //Mat U;//= projectMat / DataMat;
       //divide(projectMat,DataMat,U,-1);

       //cout << "投影矩阵: \n" << U << endl;
       cout << "投影后: \n" << projectMat << endl;
       cout << "type: " << projectMat.type() << endl;
       Mat backproMat = pca.backProject(projectMat);
       cout << "逆投影: \n" << backproMat << endl;
   }
A = 
[1.5, 2.3, 1.5, 2.3, 1.5, 2.3;
 3, 1.7, 3, 1.7, 3, 1.7;
 1.2, 2.9000001, 1.2, 2.9000001, 1.2, 2.9000001;
 2.0999999, 2.2, 2.0999999, 2.2, 2.0999999, 2.2;
 3.0999999, 3.0999999, 3.0999999, 3.0999999, 3.0999999, 3.0999999;
 1.3, 2.7, 1.3, 2.7, 1.3, 2.7;
 2, 1.7, 2, 1.7, 2, 1.7;
 1, 2, 1, 2, 1, 2;
 0.5, 0.60000002, 0.5, 0.60000002, 0.5, 0.60000002;
 1, 0.89999998, 1, 0.89999998, 1, 0.89999998]
(pca)eigenvalues =  
 [2.7613358;
 1.0636643]
(pca)eigenvectors = 
[0.4352051, 0.37938085, 0.43520534, 0.3793807, 0.43520522, 0.37938064;
 -0.37938061, 0.43520552, -0.37938067, 0.43520534, -0.37938088, 0.43520504]
(pca)means = 
[1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01]
===================================
[1.201748061921488;
 1.047597284962308;
 1.201748061921488;
 1.047597284962308;
 1.201748061921488;
 1.047597284962308]
[0.000207748176391447;
 0.0001810998764546114;
 0.000207748176391447;
 0.0001810998764546115;
 0.000207748176391447;
 0.0001810998764546115]
(my)eigenvalues =  
 [2.761335804891784;
 1.063664069223222;
 1.620076219567892e-16;
 -1.041696932190316e-33;
 -8.694544730646442e-17;
 -5.303045989490724e-16]
(my)eigenvectors = 
[0.4352053306202595, 0.3793806182886049, 0.4352053306202594, 0.3793806182886051, 0.4352053306202594, 0.3793806182886051;
 0.3793806182886052, -0.4352053306202586, 0.3793806182886049, -0.4352053306202596, 0.379380618288605, -0.4352053306202596;
 0, 0.8157956251356043, -0.02929374518914471, -0.4078978125678017, 0.0292937451891439, -0.4078978125678018;
 0, -3.228851907363228e-17, -5.992184024091453e-17, -0.7071067811865475, 7.885603783723002e-17, 0.7071067811865475;
 0, 0.03382550334104961, 0.70649973566364, -0.01691275167052486, -0.70649973566364, -0.01691275167052469;
 -0.8164965809277263, 1.665334536937735e-16, 0.4082482904638632, -1.665334536937735e-16, 0.4082482904638632, -1.665334536937735e-16]
means: 
[1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01;
 1.6700001, 2.01, 1.6700001, 2.01, 1.6700001, 2.01]
(去均值)A = 
[1.5, 2.3, 1.5, 2.3, 1.5, 2.3;
 3, 1.7, 3, 1.7, 3, 1.7;
 1.2, 2.9000001, 1.2, 2.9000001, 1.2, 2.9000001;
 2.0999999, 2.2, 2.0999999, 2.2, 2.0999999, 2.2;
 3.0999999, 3.0999999, 3.0999999, 3.0999999, 3.0999999, 3.0999999;
 1.3, 2.7, 1.3, 2.7, 1.3, 2.7;
 2, 1.7, 2, 1.7, 2, 1.7;
 1, 2, 1, 2, 1, 2;
 0.5, 0.60000002, 0.5, 0.60000002, 0.5, 0.60000002;
 1, 0.89999998, 1, 0.89999998, 1, 0.89999998]
(my)投影后: 
[4.5761504, 1.2957033;
 5.8516889, -1.1948794;
 4.8673515, 2.4205155;
 5.2457056, 0.48225659;
 7.5756493, 0.51916856;
 4.7702842, 2.0455782;
 4.546073, -0.05673724;
 3.5819001, 1.4730897;
 1.3356931, 0.21429849;
 2.3299437, 0.036912113]
投影后: 
[0.10810643, 0.5721128;
 1.3836447, -1.9184699;
 0.39930728, 1.6969252;
 0.77766162, -0.24133384;
 3.107605, -0.20442188;
 0.30224022, 1.3219877;
 0.078029051, -0.78032768;
 -0.88614398, 0.7494992;
 -3.1323509, -0.50929195;
 -2.1381004, -0.68667835]
type: 5
逆投影: 
[1.5, 2.3000002, 1.5, 2.3, 1.4999999, 2.3;
 2.9999995, 1.6999996, 3, 1.6999997, 3.0000002, 1.7000003;
 1.2000002, 2.9000008, 1.2000002, 2.9000003, 1.1999997, 2.8999999;
 2.0999997, 2.2, 2.0999999, 2.2, 2.0999999, 2.2;
 3.0999994, 3.1000004, 3.1000001, 3.0999999, 3.0999997, 3.0999997;
 1.3000001, 2.7000005, 1.3000001, 2.7000003, 1.2999997, 2.6999998;
 1.9999999, 1.6999998, 2, 1.6999999, 2.0000002, 1.7000002;
 1.0000002, 2.0000002, 1, 2, 0.99999994, 2;
 0.50000048, 0.59999937, 0.49999976, 0.5999999, 0.50000024, 0.60000026;
 1.0000004, 0.89999944, 0.99999988, 0.89999986, 1.0000002, 0.90000021]
    /** @overload
    @param data input samples stored as matrix rows or matrix columns.
    @param mean optional mean value; if the matrix is empty (@c noArray()),
    the mean is computed from the data.
    @param flags operation flags; currently the parameter is only used to
    specify the data layout (PCA::Flags)
    @param maxComponents maximum number of components that %PCA should
    retain; by default, all the components are retained.
    */
    PCA(InputArray data, InputArray mean, int flags, int maxComponents = 0);

    /** @overload
    @param data input samples stored as matrix rows or matrix columns.
    @param mean optional mean value; if the matrix is empty (noArray()),
    the mean is computed from the data.
    @param flags operation flags; currently the parameter is only used to
    specify the data layout (PCA::Flags)
    @param retainedVariance Percentage of variance that PCA should retain.
    Using this parameter will let the PCA decided how many components to
    retain but it will always keep at least 2.
    */
    PCA(InputArray data, InputArray mean, int flags, double retainedVariance);
    /** @brief Projects vector(s) to the principal component subspace.

    The methods project one or more vectors to the principal component
    subspace, where each vector projection is represented by coefficients in
    the principal component basis. The first form of the method returns the
    matrix that the second form writes to the result. So the first form can
    be used as a part of expression while the second form can be more
    efficient in a processing loop.
    @param vec input vector(s); must have the same dimensionality and the
    same layout as the input data used at %PCA phase, that is, if
    DATA_AS_ROW are specified, then `vec.cols==data.cols`
    (vector dimensionality) and `vec.rows` is the number of vectors to
    project, and the same is true for the PCA::DATA_AS_COL case.
    */
    Mat project(InputArray vec) const;

    /** @overload
    @param vec input vector(s); must have the same dimensionality and the
    same layout as the input data used at PCA phase, that is, if
    DATA_AS_ROW are specified, then `vec.cols==data.cols`
    (vector dimensionality) and `vec.rows` is the number of vectors to
    project, and the same is true for the PCA::DATA_AS_COL case.
    @param result output vectors; in case of PCA::DATA_AS_COL, the
    output matrix has as many columns as the number of input vectors, this
    means that `result.cols==vec.cols` and the number of rows match the
    number of principal components (for example, `maxComponents` parameter
    passed to the constructor).
     */
    void project(InputArray vec, OutputArray result) const;

    /** @brief Reconstructs vectors from their PC projections.

    The methods are inverse operations to PCA::project. They take PC
    coordinates of projected vectors and reconstruct the original vectors.
    Unless all the principal components have been retained, the
    reconstructed vectors are different from the originals. But typically,
    the difference is small if the number of components is large enough (but
    still much smaller than the original vector dimensionality). As a
    result, PCA is used.
    @param vec coordinates of the vectors in the principal component
    subspace, the layout and size are the same as of PCA::project output
    vectors.
     */
    Mat backProject(InputArray vec) const;

    /** @overload
    @param vec coordinates of the vectors in the principal component
    subspace, the layout and size are the same as of PCA::project output
    vectors.
    @param result reconstructed vectors; the layout and size are the same as
    of PCA::project input vectors.
     */
    void backProject(InputArray vec, OutputArray result) const;

猜你喜欢

转载自blog.csdn.net/qq_33993729/article/details/106723366
今日推荐