Halcon performs pick and place according to SCARA hand-eye calibration

Halcon performs pick and place according to SCARA hand-eye calibration

  This example shows how to use the SCARA robot to execute the pick and place application based on the calibration information determined by the SCARA hand-eye calibration. It is mainly divided into two parts:
  one is to use the model image to define the shape of the target; the
  other is to find the target in each image according to the shape model, for the selected target, calculate the robot coordinates, and then grab it.

(In order for the example to be suitable for actual applications, the image must be acquired by the camera, not read from the file)

* 为了使用提供的示例图像运行例程,必须将校正图像设置为true。
*
RectifyImages := true
* 
* 读取HDevelop示例程序中的calibrate_hand_eye_scara_stationary_cam和align_hand_eye_scara_stationary_cam.hdev之一提供的校准信息。

try
    * 读出手眼标定的结果
    * 从文本文件中读取相机外部的3D位姿
    read_pose ('cam_in_base_pose.dat', CamInBasePose)
    * 读取被抓取物体的位姿估算所需的数据
    * 从文本文件中读取相机内部参数
    read_cam_par ('camera_parameters.dat', CameraParam)
    * 读取在相机3D位姿上测量平面的的3D位姿,MPInCamPose为测量平面的3D位姿
    read_pose ('measurement_plane_in_cam_pose.dat', MPInCamPose)
catch (Exception)
    * 当文件的校准信息不能获取,则使用标注的校准信息代替
    * 要提供文件上的校准信息,运行halcon例程中的 calibrate_hand_eye_scara_stationary_cam.hdev
    * 或者 align_hand_eye_scara_stationary_cam.hdev
    CamInBasePose := [0.05592166548,0.19497621789,0.48025117245,180.09816119,29.85593363,179.94389014,0]
    CameraParam := [0.0165251,-642.277,4.65521e-006,4.65e-006,595.817,521.75,1280,1024]
    MPInCamPose := [0.0045679683065,-0.0028695297318,0.4088853425,359.78658429,29.732027579,0.22946472765,0]
endtry
* 
* 准备校准map以消除图像失真
if (RectifyImages)
    * 将测量平面的3D位姿的点坐标转换为摄像机坐标系的齐次3D变换矩阵
    prepare_rectification_map (Map, CameraParam, MPInCamPose, MappingScale, MPInCamPoseMapping)
    * 将图像点转换为z=0的世界坐标系平面
    image_points_to_world_plane (CameraParam, MPInCamPoseMapping, 0, 0, 'm', MapUpperLeftX, MapUpperLeftY)
endif
* 
dev_update_off ()
* 设置HALCON系统参数,border_shape_models利用多个算子寻找形状模型
set_system ('border_shape_models', 'true')
* 
* 此处, 应建立与机器人的连接,并将机器人移动到一个确定的备用位姿
* 以允许获取测量平面的无遮挡图像
* 
* 确定被抓去对象的形状模型

* - 获取用于模型生成的图像
read_image (Image, '3d_machine_vision/handeye/scara_stationary_cam_setup_01_metal_parts_04')
* 图像校正
if (RectifyImages)
    * 对图像进行一般变换
    map_image (Image, Map, ModelImage)
else 
    * 如果不需要校正,则复制图像并为它分配一个新内存
    copy_image (Image, ModelImage)
endif
* 
dev_close_window ()
dev_open_window_fit_image (ModelImage, 0, 0, 600, 600, WindowHandle)
set_display_font (WindowHandle, 16, 'mono', 'true', 'false')
dev_clear_window ()
dev_display (ModelImage)
dev_set_line_width (2)
*
* - 创建形状模型
*
* 创建一个平行于坐标轴的矩形(也可以用算子画任意方向的矩形)
draw_rectangle1 (WindowHandle, Row1, Column1, Row2, Column2)
gen_rectangle1 (ModelROI, Row1, Column1, Row2, Column2)
* 使用离散高斯函数平滑图像
gauss_filter (ModelImage, ImageGauss, 5)
* 缩小图像的域,即将ROI区域分割出来 
reduce_domain (ImageGauss, ModelROI, ImageReduced)
* 为匹配准备一个形状模型
* use_polarity:图像中的物体和模型必须具有相同的对比度。例如,如果模型是暗背景上的亮物体,那么只有当物体比背景亮时才会被发现。
create_shape_model (ImageReduced, 'auto', rad(0), rad(360), 'auto', 'auto', 'use_polarity', [10,50], 'auto', ModelID)
* 获取形状模型的面积和中心坐标
area_center (ModelROI, ModelROIArea, ModelROIRow, ModelROIColumn)
* 显示基于形状匹配的结果
dev_display_shape_matching_results (ModelID, 'green', ModelROIRow, ModelROIColumn, 0, 1, 1, 0)
*
* - 确定物体上的抓取点,可以通过在图像中标识(仅当工具能够以任何方向抓取物体时)
*   也可以通过用机器人抓取并记录各个机器人的位姿
*   

DefineGraspingPointByRobot := true
* 通过机器人确定抓取点
if (DefineGraspingPointByRobot)
    dev_set_colored (12)
    * 抓取点模型的基本位姿(这里的参数都是提前设置好的,是关于模型图像中目标的坐标及角度)
    GraspingPointModelInBasePose := [0.26745,0.1645,0.1229,0,0,20.5672,0]
    * 将相机3D位姿中的每一个位姿进行反转
    pose_invert (CamInBasePose, BaseInCamPose)
    * 将反转后的3D位姿转换为齐次变换矩阵(进一步转换位姿)
    pose_to_hom_mat3d (BaseInCamPose, BaseInCamHomMat3D)
    * 将进一步转换后的位姿进行任意的放射变换
    affine_trans_point_3d (BaseInCamHomMat3D, GraspingPointModelInBasePose[0], GraspingPointModelInBasePose[1], GraspingPointModelInBasePose[2], Qx, Qy, Qz)
    * 把3D点投射到()像素图像坐标中
    project_3d_point (Qx, Qy, Qz, CameraParam, GraspingPointModelRow, GraspingPointModelColumn)
    * 抓取点模型的角度
    GraspingPointModelAngle := GraspingPointModelInBasePose[5]
    if (RectifyImages)
        * 计算校正后的图像坐标
        * 将图像点转换为z=0的世界坐标系平面 
        image_points_to_world_plane (CameraParam, MPInCamPoseMapping, GraspingPointModelRow, GraspingPointModelColumn, MappingScale, GraspingPointModelXMP, GraspingPointModelYMP)
        GraspingPointModelRow := GraspingPointModelYMP - MapUpperLeftY / MappingScale
        GraspingPointModelColumn := GraspingPointModelXMP - MapUpperLeftX / MappingScale
        * 在修正后的模型图像中显示抓取姿态
        get_image_size (ModelImage, WidthM, HeightM)
        CamParamRect := [0,0,MappingScale,MappingScale,-MapUpperLeftX / MappingScale,-MapUpperLeftY / MappingScale,WidthM,HeightM]
        GraspingPointModelXMP := MapUpperLeftX + GraspingPointModelColumn * MappingScale
        GraspingPointModelYMP := MapUpperLeftY + GraspingPointModelRow * MappingScale
        PoseCoordSystemVis := [GraspingPointModelXMP,GraspingPointModelYMP,0,0,0,GraspingPointModelAngle,0]
        dev_set_colored (12)
        disp_3d_coord_system (WindowHandle, CamParamRect, PoseCoordSystemVis, 0.02)
    else
        * 如果没有校正图像,则在原来的模型图像中显示抓取位姿 
        pose_invert (CamInBasePose, BaseInCamPose)
        * 组合两个元组中给出的3D位姿
        pose_compose (BaseInCamPose, GraspingPointModelInBasePose, PoseCoordSystemVis)
        dev_set_colored (12)
        disp_3d_coord_system (WindowHandle, CameraParam, PoseCoordSystemVis, 0.02)
    endif
    disp_message (WindowHandle, 'Model contours and grasping pose', 'window', 12, 12, 'black', 'true')
* 在图像中标识
else
    * 使用二值阈值分割图像
    binary_threshold (ImageReduced, Region, 'max_separability', 'light', UsedThreshold)
    * 填补区域的漏洞
    fill_up (Region, RegionFillUp)
    * 侵蚀具有矩形结构元素的区域
    erosion_rectangle1 (RegionFillUp, RegionErosion, 160, 1)
    * 确定区域的最小周边矩形,即包含该区域的所有矩形中面积最小的矩形
    smallest_rectangle2 (RegionErosion, GraspingPointModelRow, GraspingPointModelColumn, Phi, Length1, Length2)
    * 为输入的点生成一个十字形的XLD轮廓线
    gen_cross_contour_xld (GraspingPointModel, GraspingPointModelRow, GraspingPointModelColumn, 25, 0.785398)
    dev_set_color ('yellow')
    dev_display (GraspingPointModel)
    disp_message (WindowHandle, 'Model contours and grasping point', 'window', 12, 12, 'black', 'true')
endif
area_center (ModelROI, ModelROIArea, ModelROIRow, ModelROIColumn)
* 设置形状模型的原点(参考点)
set_shape_model_origin (ModelID, GraspingPointModelRow - ModelROIRow, GraspingPointModelColumn - ModelROIColumn)
disp_continue_message (WindowHandle, 'black', 'true')
stop ()
* 
* 循环遍历图像中要被机器人抓取的物体
*
* 参数MPInCamPoseMapping的位姿变换 将测量平面的点坐标转换为摄像机坐标系的齐次三维变换矩阵
pose_to_hom_mat3d (MPInCamPoseMapping, MPInCamHomMat3DMapping)
for ImageIdx := 2 to 6 by 1
    * 获取下一张图像
    read_image (Image, '3d_machine_vision/handeye/scara_stationary_cam_setup_01_metal_parts_' + ImageIdx$'02d')
    * 
    * 对图像进行校正,以允许使用基于标准形状的匹配来搜索对象的实例
    *
    if (RectifyImages)
        map_image (Image, Map, SearchImage)
    else
        copy_image (Image, SearchImage)
    endif
    dev_clear_window ()
    dev_display (SearchImage)
    * 
    * 寻找对象实例
    * 在图像中寻找形状模型的最佳匹配(如果应该找到图像中所有超过MinScore的模型实例,则必须将NumMatches设置为0)
    find_shape_model (SearchImage, ModelID, rad(0), rad(360), 0.5, 0, 0.5, 'least_squares', [0,3], 0.9, Row, Column, Angle, Score)
    if (|Row| < 1)
        disp_message (WindowHandle, 'No objects found', 'window', 12, 12, 'black', 'true')
        continue
    endif
    * 
    * 选择一个特定的实例(这里:最左边)
    LeftmostIdx := sort_index(Column)[0]
    GraspingPointRow := Row[LeftmostIdx]
    GraspingPointColumn := Column[LeftmostIdx]
    GraspingPointAngle := Angle[LeftmostIdx]
    * 
    * 显示匹配结果,指示待抓取对象
    dev_display_shape_matching_results (ModelID, 'blue', Row, Column, Angle, 1, 1, 0)
    dev_display_shape_matching_results (ModelID, 'green', GraspingPointRow, GraspingPointColumn, GraspingPointAngle, 1, 1, 0)
    disp_message (WindowHandle, |Row| + ' objects found (Green: Object to be grasped)', 'window', 12, 12, 'black', 'true')
    disp_continue_message (WindowHandle, 'black', 'true')
    stop ()
    * 
    * 计算带固定摄像机的SCARA机器人要接近的点(ObjInBasePose:待抓取物体在机器人基底坐标系中的位姿)
    calculate_point_to_approach_scara_stationary (GraspingPointRow, GraspingPointColumn, GraspingPointAngle + rad(GraspingPointModelAngle), RectifyImages, MapUpperLeftX, MapUpperLeftY, MappingScale, MPInCamHomMat3DMapping, CameraParam, MPInCamPose, CamInBasePose, ObjInBasePose)
    * 
    * 被抓取物体连同抓取点一起显示
    dev_clear_window ()
    dev_display (SearchImage)
    dev_display_shape_matching_results (ModelID, 'green', GraspingPointRow, GraspingPointColumn, GraspingPointAngle, 1, 1, 0)
    * 
    dev_set_colored (12)
    if (RectifyImages)
        * 获取图像的宽高
        get_image_size (SearchImage, Width, Height)
        CamParamRect := [0,0,MappingScale,MappingScale,-MapUpperLeftX / MappingScale,-MapUpperLeftY / MappingScale,Width,Height]
        GraspingPointXMP := MapUpperLeftX + GraspingPointColumn * MappingScale
        GraspingPointYMP := MapUpperLeftY + GraspingPointRow * MappingScale
        PoseCoordSystemVis := [GraspingPointXMP,GraspingPointYMP,0,0,0,-deg(GraspingPointAngle) + GraspingPointModelAngle,0]
        * 显示坐标系的轴
        disp_3d_coord_system (WindowHandle, CamParamRect, PoseCoordSystemVis, 0.02)
    else
        pose_invert (CamInBasePose, BaseInCamPose)
        pose_compose (BaseInCamPose, ObjInBasePose, PoseCoordSystemVis)
        disp_3d_coord_system (WindowHandle, CameraParam, PoseCoordSystemVis, 0.02)
    endif
    * 
    disp_message (WindowHandle, '按F5抓取并放置指定的对象', 'window', 12, 12, 'black', 'true')
    disp_message (WindowHandle, ['ObjInBasePose:','Tx:    ','Ty:    ','Tz:    ','Alpha: ','Beta:  ','Gamma: '] + ['',ObjInBasePose[0:5]$'.3f' + [' m',' m',' m',' deg',' deg',' deg']], 'window', 305, 12, 'black', 'true')
    disp_continue_message (WindowHandle, 'black', 'true')
    stop ()
    * 如有必要,将姿态的平移部分转换为mm
    ToolInBasePoseMM := [ObjInBasePose[0:2] * 1000,ObjInBasePose[3:6]]
    * 
    * 抓取并放置对象
    * 
    * 在这里,应该将机器人移动到上述确定的对象(ToolInBasePoseMM)的位姿,
    * 在该位姿下,抓取对象,然后将其放置在某个预定义的位置(类似于PlacePositionInBasePoseMM)
    * t最后,机器人应再次移动到备用姿态,以便拍摄测量平面的无遮挡图像。
    *
endfor
* 
* 在这里,与机器人的连接应该关闭。
* 
* 释放形状模型的内存
clear_shape_model (ModelID)
set_system ('border_shape_models', 'false')
dev_clear_window ()

Guess you like

Origin blog.csdn.net/Kevin_Sun777/article/details/108753352