机器人视觉项目:视觉检测识别+机器人跟随(6)

opencv的行人检测demo中的get_support_vector在之前返回的值一直是1,也就是支持向量个数一直是1,这样在
for(int i=0; i<supportVectorNum; i++)

        {

            const float * pSVData = svm.get_support_vector(i);//返回所有支持向量的指针
        cout<<"支持向量:"<< * pSVData<<endl;

            for(int j=0; j<DescriptorDim; j++)

            {
        //有没有可能访问了空指针?
                //supportVectorMat.at<float>(i,j) = pSVData[j];
                supportVectorMat.at<float>(i,j) = pSVData[j];
                

            }

        }


        double * pAlphaData = svm.get_alpha_vector();

        for(int i=0; i<supportVectorNum; i++)

        {

            alphaMat.at<float>(0,i) = pAlphaData[i];

        }   
这段代码中明显出现数组越界了,所以报错段错误就有理由了,
 int supportVectorNum = svm.get_support_vector_count();//支持向量的个数
 这句代码是表明支持向量的个数是有这个函数得到的,这个函数并没有参数传入,那是由什么因素决定的支持向量个数?网上有人问这个问题,

 但是没有搜啊到有用的回答,这个有点头疼,,

runtracker.cpp

#include <iostream>

#include <fstream>

#include <sstream>

#include <algorithm>

#include <dirent.h>

#include <math.h>

#include <ros/ros.h>

#include <ros/package.h>

#include <image_transport/image_transport.h>

#include <cv_bridge/cv_bridge.h>

#include <sensor_msgs/image_encodings.h>

#include "geometry_msgs/Twist.h"

#include <opencv2/core/core.hpp>

#include <opencv2/highgui/highgui.hpp>

#include <opencv2/imgproc/imgproc.hpp>

#include <opencv2/objdetect/objdetect.hpp>

#include <stdio.h>

#include <string.h>

#include <ctype.h>

#include "kcftracker.hpp"

#include <opencv2/ml/ml.hpp>  

using namespace cv;

using namespace std;

static const std::string RGB_WINDOW = "RGB Image window";

//static const std::string DEPTH_WINDOW = "DEPTH Image window";

#define Max_linear_speed 1

#define Min_linear_speed 0.4

#define Min_distance 1.0

#define Max_distance 5.0

#define Max_rotation_speed 0.75

float linear_speed = 0;

float rotation_speed = 0;

float k_linear_speed = (Max_linear_speed - Min_linear_speed) / (Max_distance - Min_distance);

float h_linear_speed = Min_linear_speed - k_linear_speed * Min_distance;

float k_rotation_speed = 0.004;

float h_rotation_speed_left = 1.2;

float h_rotation_speed_right = 1.36;

float distance_scale = 1.0;

int ERROR_OFFSET_X_left1 = 100;

int ERROR_OFFSET_X_left2 = 300;

int ERROR_OFFSET_X_right1 = 340;

int ERROR_OFFSET_X_right2 = 600;

int roi_height = 100;

int roi_width = 100;

cv::Mat rgbimage;

cv::Mat depthimage;

cv::Rect selectRect;

cv::Point origin;

cv::Rect result;

bool select_flag = false;

bool bRenewROI = false;  // the flag to enable the implementation of KCF algorithm for the new chosen ROI

bool bBeginKCF = false;

bool enable_get_depth = false;

bool HOG = true;

bool FIXEDWINDOW = false;

bool MULTISCALE = true;

bool SILENT = true;

bool LAB = false;

int DescriptorDim;

bool has_dectect_people ;

// Create KCFTracker object

KCFTracker tracker(HOG, FIXEDWINDOW, MULTISCALE, LAB);

vector<float> myDetector;

float dist_val[5] ;

/*
void onMouse(int event, int x, int y, int, void*)

{

    if (select_flag)

    {

        selectRect.x = MIN(origin.x, x);

        selectRect.y = MIN(origin.y, y);

        selectRect.width = abs(x - origin.x);

        selectRect.height = abs(y - origin.y);

        selectRect &= cv::Rect(0, 0, rgbimage.cols, rgbimage.rows);

    }

    if (event == CV_EVENT_LBUTTONDOWN)

    {

        bBeginKCF = false;

        select_flag = true;

        origin = cv::Point(x, y);

        selectRect = cv::Rect(x, y, 0, 0);

    }

    else if (event == CV_EVENT_LBUTTONUP)

    {

        select_flag = false;

        bRenewROI = true;

    }

}
*/

/*
class MySVM : public CvSVM
{
public:

    //»ñµÃSVMµÄŸö²ßº¯ÊýÖеÄalphaÊý×é

    double * get_alpha_vector()

    {

        return this->decision_func->alpha;


    }


    //»ñµÃSVMµÄŸö²ßº¯ÊýÖеÄrho²ÎÊý,ŒŽÆ«ÒÆÁ¿

    float get_rho()

    {

        return this->decision_func->rho;

    }

};
*/

class MySVM : public CvSVM
{
public:
    //获得SVM的决策函数中的alpha数组  
    double * get_alpha_vector()
    {
        return this->decision_func->alpha;

    }
 
    //获得SVM的决策函数中的rho参数,即偏移量  
    float get_rho()
    {
        return this->decision_func->rho;
    }
};


class ImageConverter

{

    ros::NodeHandle nh_;

    image_transport::ImageTransport it_;

    image_transport::Subscriber image_sub_;

    image_transport::Subscriber depth_sub_;

    HOGDescriptor hog;

public:

    ros::Publisher pub;

    ImageConverter()

    : it_(nh_)

    {

        // Subscrive to input video feed and publish output video feed

       /* image_sub_ = it_.subscribe("/camera/rgb/image_rect_color", 1,&ImageConverter::imageCb, this);

        depth_sub_ = it_.subscribe("/camera/depth/image", 1,&ImageConverter::depthCb, this);
*/   
    
    image_sub_ = it_.subscribe("/kinect/rgb/image_color", 1,&ImageConverter::imageCb, this);
        depth_sub_ = it_.subscribe("/kinect/depth/image", 1,&ImageConverter::depthCb, this);
        pub = nh_.advertise<geometry_msgs::Twist>("/cmd_vel", 1000);

        //pub = nh_.advertise<geometry_msgs::Twist>("/cmd_vel", 1000);
//    printf("22222");
        preparePeopleDetect();
    printf("\n00\n");
        cv::namedWindow(RGB_WINDOW);
    printf("\n11\n");
        //cv::namedWindow(DEPTH_WINDOW);

    }

    ~ImageConverter()

    {

        cv::destroyWindow(RGB_WINDOW);

        //cv::destroyWindow(DEPTH_WINDOW);

    }

    void imageCb(const sensor_msgs::ImageConstPtr& msg)

    {
    printf("imagecb");
        cv_bridge::CvImagePtr cv_ptr;

        try

        {

            cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);

        }

        catch (cv_bridge::Exception& e)

        {

            ROS_ERROR("cv_bridge exception: %s", e.what());

            return;

        }

        cv_ptr->image.copyTo(rgbimage);

        peopleDetect();

        if(has_dectect_people&&!select_flag)

        {

            printf("has_dectect_people = true\n");

            selectRect &= cv::Rect(0,0,rgbimage.cols,rgbimage.rows);

            bRenewROI = true;

            select_flag = true;

        }

        //cv::setMouseCallback(RGB_WINDOW, onMouse, 0);

        if(bRenewROI)

        {

            // if (selectRect.width <= 0 || selectRect.height <= 0)

            // {

            //     bRenewROI = false;

            //     //continue;

            // }

            tracker.init(selectRect, rgbimage);//init tarcker

            bBeginKCF = true;

            bRenewROI = false;

            enable_get_depth = false;

        }

        if(bBeginKCF)

        {

            result = tracker.update(rgbimage);//update tracker

            cv::rectangle(rgbimage, result, cv::Scalar( 0, 255,0 ), 1, 8, 0);

            enable_get_depth = true;

        }

        else

            cv::rectangle(rgbimage, selectRect, cv::Scalar(0, 255, 0), 2, 8, 0);

        cv::imshow(RGB_WINDOW, rgbimage);// imshow windows

        cv::waitKey(1);

    }

    void preparePeopleDetect()

    {

        has_dectect_people = false;

        //hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

        MySVM svm;

        string path = ros::package::getPath("track_pkg")+"/src/12000neg_2400pos.xml";

        printf("path = %s",path.c_str());

        //svm.load("/home/server/catkin_ws/src/tracker_kcf_ros/src/track_pkg/src/12000neg_2400pos.xml");

        svm.load(path.c_str());
        //svm.load("/home/xiaoqiang/catkin_ws/src/track_pkg/src/12000neg_2400pos.xml");

        DescriptorDim = svm.get_var_count();//特征向量的个数,hog描述子维数

        int supportVectorNum = svm.get_support_vector_count();//支持向量的个数
    cout<<endl;
        cout<<"支持向量个数:"<<supportVectorNum<<endl;
        cout<<"特征描述子维数:"<<DescriptorDim<<endl;


        Mat alphaMat = Mat::zeros(1, supportVectorNum, CV_32FC1);//alpha矩阵

        Mat supportVectorMat = Mat::zeros(supportVectorNum, DescriptorDim, CV_32FC1);//支持向量矩阵

        Mat resultMat = Mat::zeros(1, DescriptorDim, CV_32FC1);//alpha矩阵和支持向量矩阵的乘积

/*
        for(int i=0; i<supportVectorNum; i++)

        {

            const float * pSVData = svm.get_support_vector(i);//获得对应的索引编号的支持向量

            for(int j=0; j<DescriptorDim; j++)

            {
        //有没有可能访问了空指针?
                //supportVectorMat.at<float>(i,j) = pSVData[j];
                supportVectorMat.at<float>(i,j) = pSVData[j];
                

            }

        }


        double * pAlphaData = svm.get_alpha_vector();

        for(int i=0; i<supportVectorNum; i++)

        {

            alphaMat.at<float>(0,i) = pAlphaData[i];

        }
*/

//这里存在问题
    for(int i=0; i<supportVectorNum; i++)

        {

            const float * pSVData = svm.get_support_vector(i);//返回所有支持向量的指针
        cout<<"支持向量:"<< * pSVData<<endl;

            for(int j=0; j<DescriptorDim; j++)

            {
        //有没有可能访问了空指针?
                //supportVectorMat.at<float>(i,j) = pSVData[j];
                supportVectorMat.at<float>(i,j) = pSVData[j];
                

            }

        }


        double * pAlphaData = svm.get_alpha_vector();

        for(int i=0; i<supportVectorNum; i++)

        {

            alphaMat.at<float>(0,i) = pAlphaData[i];

        }   

    

        //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);

        resultMat = -1 * alphaMat * supportVectorMat;


 
        for(int i=0; i<DescriptorDim; i++)

        {

            myDetector.push_back(resultMat.at<float>(0,i));

        }

        //

        myDetector.push_back(svm.get_rho());

        cout<<"检测子维数 = "<<myDetector.size()<<endl;

        hog.setSVMDetector(myDetector);

        ofstream fout("HOGDetectorForOpenCV.txt");

        for(unsigned int i=0; i<myDetector.size(); i++)

        {

           fout<<myDetector[i]<<endl;

        }
    
        printf("Start the tracking process");


    }

    

    void peopleDetect()

    {

        if(has_dectect_people)

            return;

        vector<Rect> found, found_filtered;

        double t = (double)getTickCount();

        hog.detectMultiScale(rgbimage, found, 0, Size(8,8), Size(32,32), 1.05, 2);//hog_detect

        t = (double)getTickCount() - t;

        printf("\tdetection time = %gms\n", t*1000./cv::getTickFrequency());

        size_t i, j;

        //printf("found.size= %d",found.size());
    cout<<"找到的矩形框个数:"<<found.size()<<endl;

    //找出所有没有嵌套的矩形框r,并放入found_filtered中,如果有嵌套的话,则取外面最大的那个矩形框放入found_filtered中
        for( i = 0; i < found.size(); i++ )

        {

            Rect r = found[i];

            for( j = 0; j < found.size(); j++ )

                if( j != i && (r & found[j]) == r)

                    break;

            if( j == found.size())

                found_filtered.push_back(r);

        }

        Rect r ;
    
    //画矩形框,因为hog检测出的矩形框比实际人体框要稍微大些,所以这里需要做一些调整
        for( i = 0; i < found_filtered.size(); i++ )

        {

            r = found_filtered[i];

            // the HOG detector returns slightly larger rectangles than the real objects.

            // so we slightly shrink the rectangles to get a nicer output.

            r.x += cvRound(r.width*0.1);

            r.width = cvRound(r.width*0.8);

            r.y += cvRound(r.height*0.07);

            r.height = cvRound(r.height*0.8);

            rectangle(rgbimage, r.tl(), r.br(), cv::Scalar(0,255,0), 3);

            //printf("r.x==%d,y==%d,width==%d,height==%d\n",r.x,r.y,r.width,r.height);

        }

        if(r.width>100&&r.height>350){

            has_dectect_people=true;

            selectRect.x = r.x+(r.width-roi_width)/2;

            selectRect.y = r.y+(r.height-roi_height)/2;

            selectRect.width = roi_width;

            selectRect.height = roi_height;

            printf("selectRect.x==%d,y==%d,width==%d,height==%d\n", selectRect.x,selectRect.y,selectRect.width,selectRect.height);

        }//imshow(RGB_WINDOW, rgbimage);

    }

    void depthCb(const sensor_msgs::ImageConstPtr& msg)

    {

        cv_bridge::CvImagePtr cv_ptr;

        try

        {

            cv_ptr = cv_bridge::toCvCopy(msg,sensor_msgs::image_encodings::TYPE_32FC1);

            cv_ptr->image.copyTo(depthimage);

        }

        catch (cv_bridge::Exception& e)

        {

            ROS_ERROR("Could not convert from '%s' to 'TYPE_32FC1'.", msg->encoding.c_str());

        }

        if(enable_get_depth)

        {

            dist_val[0] = depthimage.at<float>(result.y+result.height/3 , result.x+result.width/3) ;

            dist_val[1] = depthimage.at<float>(result.y+result.height/3 , result.x+2*result.width/3) ;

            dist_val[2] = depthimage.at<float>(result.y+2*result.height/3 , result.x+result.width/3) ;

            dist_val[3] = depthimage.at<float>(result.y+2*result.height/3 , result.x+2*result.width/3) ;

            dist_val[4] = depthimage.at<float>(result.y+result.height/2 , result.x+result.width/2) ;

            float distance = 0;

            int num_depth_points = 5;

            for(int i = 0; i < 5; i++)

            {

                if(dist_val[i] > 0.4 && dist_val[i] < 10.0)

                    distance += dist_val[i];

                else

                    num_depth_points--;

            }

            distance /= num_depth_points;

            //calculate linear speed

            if(distance > Min_distance)

                linear_speed = distance * k_linear_speed + h_linear_speed;

            else if (distance <= Min_distance-0.5){

                //linear_speed = 0;

                linear_speed =-1* ((Min_distance-0.5) * k_linear_speed + h_linear_speed);

            }else{

                linear_speed = 0;

            }

            if( fabs(linear_speed) > Max_linear_speed)

                linear_speed = Max_linear_speed;

            //calculate rotation speed

            int center_x = result.x + result.width/2;

            if(center_x < ERROR_OFFSET_X_left1){

                printf("center_x <<<<<<<< ERROR_OFFSET_X_left1\n");

                rotation_speed =  Max_rotation_speed/5;

                has_dectect_people = false;

                enable_get_depth = false;

                select_flag = false;

                bBeginKCF = false;

            }

            else if(center_x > ERROR_OFFSET_X_left1 && center_x < ERROR_OFFSET_X_left2)

                rotation_speed = -k_rotation_speed * center_x + h_rotation_speed_left;

            else if(center_x > ERROR_OFFSET_X_right1 && center_x < ERROR_OFFSET_X_right2)

                rotation_speed = -k_rotation_speed * center_x + h_rotation_speed_right;

            else if(center_x > ERROR_OFFSET_X_right2){

                printf("center_x >>>>>>>> ERROR_OFFSET_X_right2\n");

                rotation_speed = -Max_rotation_speed/5;

                has_dectect_people = false;

                enable_get_depth = false;

                select_flag = false;

                bBeginKCF = false;

            }

            else

                rotation_speed = 0;

            std::cout <<  "linear_speed = " << linear_speed << "  rotation_speed = " << rotation_speed << std::endl;

             std::cout <<  dist_val[0]  << " / " <<  dist_val[1] << " / " << dist_val[2] << " / " << dist_val[3] <<  " / " << dist_val[4] << std::endl;

             std::cout <<  "distance = " << distance << std::endl;

        }

        //cv::imshow(DEPTH_WINDOW, depthimage);

        cv::waitKey(1);

    }

};

int main(int argc, char** argv)

{    //printf("bb\n");

    ros::init(argc, argv, "kcf_tracker");
    //printf("gg\n");
    ImageConverter ic;

    printf("dd\n");
    printf("hehe  ");

    while(ros::ok())
    //while(1)

    {
    printf("haha   ");
        ros::spinOnce();

        geometry_msgs::Twist twist;

        twist.linear.x = linear_speed;

        twist.linear.y = 0;

        twist.linear.z = 0;

        twist.angular.x = 0;

        twist.angular.y = 0;

        twist.angular.z = rotation_speed;

        ic.pub.publish(twist);

        if (cvWaitKey(33) == 'q')

            break;


    }

    return 0;

}

猜你喜欢

转载自blog.csdn.net/Synioe/article/details/82793288