Xtrak 塔克小车巡线代码以及红绿灯识别相关小改动

做的小改动:

        只更改了xtark_follow_line.py

        增加了一次HSV分割来做红绿灯识别(建议到时候用手机或者平板显示红色或者绿色图片),红绿图片:

 

        将检测道路位置的方式从重心变为最接近屏幕中心的道路像素点。

        检测不到道路之后会检测上面一段区域,来处理断线

        修改速度处理函数来使得运行更平稳

答辩的PPT下载:比较简洁,非常建议使用自带的PowerPoint打开,(因为用了平滑切换)

里面有一点演示视频(小车部分)

改动前原始代码:

#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import numpy
from geometry_msgs.msg import Twist

# 色彩矩阵,当前测试出的几种色彩
color_arry = [[100,55,90,130,215,170], # red
              [0,220,160,20,255,220],# blue
              [20,155,45,60,195,85], # green
              [75,195,195,115,235,255], # yellow
              [0,0,0,150,40,46]]    # black
# 记录颜色跟随时的误差值
diff= 0
d_diff= 0
last_diff= 0

def line_follow_init():    
    #define topic publisher and subscriber
    global color_mode, bridge, image_sub, mask_pub, result_pub, pub_cmd
    bridge = CvBridge()
    image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, callback)
    mask_pub = rospy.Publisher("/mask_image", Image, queue_size=1)
    result_pub = rospy.Publisher("/line_image", Image, queue_size=1)
    pub_cmd = rospy.Publisher('cmd_vel', Twist, queue_size=5)

    color_mode = int(rospy.get_param('~color_mode'))
    rospy.loginfo('color_mode:%d', color_mode)


def callback(data):
    global raw_image, mask_image
    # convert ROS topic to CV image formart
    # 将将ROS主题转换为CV图像格式
    raw_image = bridge.imgmsg_to_cv2(data, "bgr8")
    raw_image = cv2.resize(raw_image, (320,240), interpolation=cv2.INTER_AREA)#提高帧率
    # 将图像从 RGB 转为 HSV    
    hsv_image = cv2.cvtColor(raw_image,cv2.COLOR_RGB2HSV)

    # close operation to fit some little hole
    # 创建一个5行5列的数组
    kernel = numpy.ones((5,5),numpy.uint8)
    # 对图片进行膨胀腐蚀操作
    hsvimage_erode = cv2.erode(hsv_image,kernel,iterations=1)
    hsvimag_dilate = cv2.dilate(hsvimage_erode,kernel,iterations=1)
    # 设置图像的HSV阈值
    # h_low s_low v_low
    color_lower = numpy.array([color_arry[color_mode][0],color_arry[color_mode][1],color_arry[color_mode][2]])
    # h_upper s_upper v_upper
    color_upper = numpy.array([color_arry[color_mode][3],color_arry[color_mode][4],color_arry[color_mode][5]])
    # 得到处理后的二值化图像
    mask_image = cv2.inRange(hsvimag_dilate,color_lower,color_upper)
    masked = cv2.bitwise_and(raw_image, raw_image, mask=mask_image)
    # 图像处理函数
    image_processing()


# 对原始图像进行处理,获得我们需要的图像部分
def image_processing():
    res = raw_image
    h, w, d = res.shape
    search_top = h-20
    search_bot = h
    mask_image[0:search_top, 0:w] = 0
    mask_image[search_bot:h, 0:w] = 0
    # 计算二值化图像的重心,即几何中心
    P = cv2.moments(mask_image)
    # 得到可识别的颜色数据,进行识别和色差计算
    if P['m00'] > 0:
        ix = int(P['m10']/P['m00'])
        iy = int(P['m01']/P['m00'])
        cv2.circle(res, (ix, iy), 10, (255, 0, 255), -1)
        if cv2.circle:
            diff= ix - w/2-15
            d_diff=diff-last_diff
            twist_calculate(diff,d_diff)
    else:
        twist = Twist()
        twist.linear.x = 0
        twist.linear.y = 0
        twist.linear.z = 0
        twist.angular.x = 0
        twist.angular.y = 0
        twist.angular.z = 0
        pub_cmd.publish(twist)

    # 将实际图像和二值化图像通过话题发出
    img_msg = bridge.cv2_to_imgmsg(res, encoding="bgr8")
    img_msg.header.stamp = rospy.Time.now()
    result_pub.publish(img_msg)
    img_msg = bridge.cv2_to_imgmsg(mask_image, encoding="passthrough")
    img_msg.header.stamp = rospy.Time.now()
    mask_pub.publish(img_msg)



# 速度处理函数
def twist_calculate(twist_erro,twist_d_erro):
    twist = Twist()
    twist.linear.x = 0.2
    twist.linear.y = 0
    twist.linear.z = 0
    twist.angular.x = 0
    twist.angular.y = 0
    twist.angular.z = 0
    if twist_erro!=0:
        twist.angular.z = -float(twist_erro)*0.005-float(twist_d_erro)*0.000
    else:
        twist.angular.z = 0
    last_diff=diff
    pub_cmd.publish(twist)



if __name__ == '__main__':
    try:
        # init ROS node 
        rospy.init_node("line_follow")
        rospy.loginfo("Starting Line Follow node")
        line_follow_init()
        rospy.spin()
    except KeyboardInterrupt:
        print ("Shutting down cv_bridge_test node.")
        cv2.destroyAllWindows()

改动后代码:

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time

import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import numpy
from geometry_msgs.msg import Twist

# 色彩矩阵,当前测试出的几种色彩
color_arry = [[100, 55, 90, 130, 215, 170],  # red
              [0, 220, 160, 20, 255, 220],  # blue
              [20, 155, 45, 60, 195, 85],  # green
              [75, 195, 195, 115, 235, 255],  # yellow
              [0, 0, 0, 150, 40, 46], ]  # black
# 记录颜色跟随时的误差值
diff = 0
d_diff = 0
last_diff = 0
up_diff = 0
red_light = False  # 遇见红灯


def line_follow_init():
    # define topic publisher and subscriber
    global color_mode, bridge, image_sub, mask_pub, result_pub, pub_cmd
    bridge = CvBridge()
    image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, callback)
    mask_pub = rospy.Publisher("/mask_image", Image, queue_size=1)
    result_pub = rospy.Publisher("/line_image", Image, queue_size=1)
    pub_cmd = rospy.Publisher('cmd_vel', Twist, queue_size=5)

    color_mode = int(rospy.get_param('~color_mode'))
    rospy.loginfo('color_mode:%d', color_mode)


def callback(data):
    global raw_image, mask_image, red_light
    # convert ROS topic to CV image formart
    # 将将ROS主题转换为CV图像格式
    raw_image = bridge.imgmsg_to_cv2(data, "bgr8")
    raw_image = cv2.resize(raw_image, (320, 240), interpolation=cv2.INTER_AREA)  # 提高帧率
    # 将图像从 RGB 转为 HSV    
    hsv_image = cv2.cvtColor(raw_image, cv2.COLOR_RGB2HSV)

    # close operation to fit some little hole
    # 创建一个5行5列的数组
    kernel = numpy.ones((5, 5), numpy.uint8)
    # 对图片进行膨胀腐蚀操作
    # hsvimage_erode = cv2.erode(hsv_image, kernel, iterations=1)
    # hsvimag_dilate = cv2.dilate(hsvimage_erode, kernel, iterations=1)
    # 设置图像的HSV阈值
    # h_low s_low v_low
    color_lower = numpy.array([color_arry[color_mode][0], color_arry[color_mode][1], color_arry[color_mode][2]])
    # h_upper s_upper v_upper
    color_upper = numpy.array([color_arry[color_mode][3], color_arry[color_mode][4], color_arry[color_mode][5]])
    # 得到处理后的二值化图像
    mask_image = cv2.inRange(hsv_image, color_lower, color_upper)
    # 红灯和绿灯的HSV并二值化
    color_red_lower = numpy.array([100, 170, 120])
    color_red_upper = numpy.array([130, 240, 240])
    color_green_lower = numpy.array([65, 220, 150])
    color_green_upper = numpy.array([80, 230, 220])
    # 自行设置,让红绿灯出现在非0条内
    h, w = mask_image.shape
    search_top = h - 140
    search_bot = h - 80
    # 没遇到红灯找红灯
    if not red_light:
        mask_red_image = cv2.inRange(hsv_image, color_red_lower, color_red_upper)
        # 自行调参
        mask_red_image = cv2.erode(mask_red_image, kernel, iterations=1)
        if numpy.amax(mask_red_image) > 0:
            print("遇到红灯")
            red_light = True
            pass
    else:  # 遇到找绿灯
        mask_green_image = cv2.inRange(hsv_image, color_green_lower, color_green_upper)
        # 自行调参
        mask_green_image = cv2.erode(mask_green_image, kernel, iterations=1)
        if numpy.amax(mask_green_image) > 0:
            print("遇到绿灯")
            red_light = False
            pass
    if red_light:  # 红灯停
        twist = Twist()
        twist.linear.x = 0
        twist.linear.y = 0
        twist.linear.z = 0
        twist.angular.x = 0
        twist.angular.y = 0
        pub_cmd.publish(twist)
    else:
        # masked = cv2.bitwise_and(raw_image, raw_image, mask=mask_image)
        # 图像处理函数
        image_processing()


# 对原始图像进行处理,获得我们需要的图像部分
def image_processing():
    global mask_image, up_diff, last_diff, diff
    res = raw_image
    h, w, d = res.shape
    search_top = h - 20
    search_bot = h
    img_up = mask_image.copy()
    mask_image[0:search_top, 0:w] = 0
    mask_image[search_bot:h, 0:w] = 0
    # 计算二值化图像
    P = [None, None]
    for i in range(w // 2 - 17):
        if P[0] is not None:
            break
        for j in range(20):
            if P[0] is not None:
                break
            if not mask_image[search_top + j, w // 2 - 15 + i] == 0:
                P = [search_top + j, w // 2 - 15 + i]
            elif not mask_image[search_top + j, w // 2 - 15 - i] == 0:
                P = [search_top + j, w // 2 - 15 - i]
    # 得到可识别的颜色数据,进行识别和色差计算
    if P[0] is not None:  # 如果在最下面找到线
        ix = int(P[1])
        iy = int(P[0])
        cv2.circle(res, (ix, iy), 10, (255, 0, 255), -1)
        if cv2.circle:
            diff = ix - w / 2
            d_diff = diff - last_diff
            twist_calculate(diff, d_diff)
    else:  # 如果在最下面没有找到线
        search_top = h - 60  # 在远处找找有没有线(断线)
        search_bot = h - 20
        mask_image = img_up[:]
        mask_image[0:search_top, 0:w] = 0
        mask_image[search_bot:h, 0:w] = 0
        for i in range(w // 2 - 17):
            if P[0] is not None:
                break
            for j in range(40):
                if P[0] is not None:
                    break
                if not mask_image[search_top + j, w // 2 - 15 + i] == 0:
                    P = [search_top + j, w // 2 - 15 + i]
                elif not mask_image[search_top + j, w // 2 - 15 - i] == 0:
                    P = [search_top + j, w // 2 - 15 - i]
        # 得到可识别的颜色数据,进行识别和色差计算
        if P[0] is not None:  # 如果在最下面找到线
            ix = int(P[1])
            iy = int(P[0])
            cv2.circle(res, (ix, iy), 10, (255, 0, 255), -1)
            if cv2.circle:
                diff = ix - w / 2
                d_diff = diff - last_diff
                twist_calculate(diff, d_diff)
        else:  # 还没找到,朝上次看见线的方向旋转
            twist = Twist()
            twist.linear.x = 0
            twist.linear.y = 0
            twist.linear.z = 0
            twist.angular.x = 0
            twist.angular.y = 0
            if up_diff > 0:
                twist.angular.z = -0.2
            else:
                twist.angular.z = +0.2
            pub_cmd.publish(twist)

    # 将实际图像和二值化图像通过话题发出
    img_msg = bridge.cv2_to_imgmsg(res, encoding="bgr8")
    img_msg.header.stamp = rospy.Time.now()
    result_pub.publish(img_msg)
    img_msg = bridge.cv2_to_imgmsg(mask_image, encoding="passthrough")
    img_msg.header.stamp = rospy.Time.now()
    mask_pub.publish(img_msg)


# 速度处理函数
def twist_calculate(twist_erro, twist_d_erro):
    global last_diff, up_diff, diff
    twist = Twist()
    twist.linear.x = 0.1
    twist.linear.y = 0
    twist.linear.z = 0
    twist.angular.x = 0
    twist.angular.y = 0
    if abs(twist_erro) < 20:  # 有偏转,向重心方向偏转
        twist.angular.z = 0
    else:
        twist.angular.z = -float(twist_erro) * 0.0025 - float(twist_d_erro) * 0.000
        up_diff = twist_d_erro
        if abs(twist_erro) > 100:  # 太大了,停下来慢慢转
            twist.linear.x = 0
    last_diff = diff
    pub_cmd.publish(twist)


if __name__ == '__main__':
    try:
        # init ROS node 
        rospy.init_node("line_follow")
        rospy.loginfo("Starting Line Follow node")
        line_follow_init()
        rospy.spin()
    except KeyboardInterrupt:
        print("Shutting down cv_bridge_test node.")
        cv2.destroyAllWindows()

 建议通过U盘拷到小车上(建议复制粘贴文本进去,不要复制粘贴文件进去)通过远程传输,远程连接,虚拟机之类的,里面的中文可能会变成乱码,暂不知道对运行有何影响

如果在小车上不能运行,请右键该文件,选择属性,第二栏,勾选可执行选项。

猜你喜欢

转载自blog.csdn.net/weixin_58196051/article/details/131159250
今日推荐