总结一下三维重建的代码

之前做的东西挺乱,用起来也不方便,记录一下代码的具体用法吧

整体的代码在最后面

1,使用的话要改两处路径:分别是图片路径和标定板路径

 2,使用的话要改想要重建的物体图片名字

 3,至于最后三维重建一般不用改阈值之类的,如果报错再进行修改,那部分比较复杂,其他文章好像有记录怎么改,现在忘了,专栏里面有,在不同情况下这个值变化会比较大,可能重建的时候会有问题如下:下次再具体说一下吧

4,记得改图片数量

 5,记得改相机参数,这里是焦距和像元大小

6,这里直接读取标定板描述文件,也有生成的代码,没有提前生成的话就取消那句注释

 7,图片的路径,我是都放在一个文件夹里面的,区分左右

 8,标定板文件路径,其实随便放哪里

 看一下效果吧:

第一组:左右相机图片(调整后的)

 匹配的重合区域视差图与三维重建图

第二组:左右相机图片(调整后的)

 匹配的重合区域视差图与三维重建图


*set_calib_data_calib_object (CalibDataID, 0, CalTabFile)
ImgPath := 'H:/multicam_rebuild/pic9_12/'
* Read the first images to get their size
Index := 1
read_image (ImageL, ImgPath + 'l' + Index$'02d')
read_image (ImageR, ImgPath + 'r' + Index$'02d')
* Reopen the windows with an appropriate size
dev_close_window ()
dev_update_off ()
get_image_size (ImageL, WidthL, HeightL)
dev_open_window (0, 0, WidthL, HeightL, 'black', WindowHandle1)
dev_set_draw ('margin')
dev_set_color ('green')
set_display_font (WindowHandle1, 14, 'mono', 'true', 'false')
get_image_size (ImageR, WidthR, HeightR)
dev_open_window (0, WidthL+10, WidthL, HeightL, 'black', WindowHandle2)
dev_set_draw ('margin')
dev_set_color ('green')

* Set the initial values for the internal camera parameters
gen_cam_par_area_scan_division (0.016, 0, 3.75e-6, 3.75e-6, WidthL / 2.0, HeightL / 2.0, WidthL, HeightL, StartCamParL)
StartCamParR := StartCamParL

* Read the model calibration points.
*create_caltab(26, 30, 0.003, [12, 4, 4, 20, 20], [14, 4, 24, 4, 24], 'light_on_dark', 'D:/教学/课程/机器人视觉技术/材料/caltab200.descr', 'D:/caltab200.ps')
CalTabFile := 'H:/multicam_rebuild/7mm.descr'

create_calib_data ('calibration_object', 2, 1, CalibDataID)
* Set the camera type used
set_calib_data_cam_param (CalibDataID, 0, [], StartCamParL)
set_calib_data_cam_param (CalibDataID, 1, [], StartCamParR)
* Set the calibration object
set_calib_data_calib_object (CalibDataID, 0, CalTabFile)
* Start the loop over the calibration images
* Set the optimization method to be used
set_calib_data (CalibDataID, 'model', 'general', 'optimization_method', 'nonlinear')


* Start the loop over the calibration images
for I := 1 to 12 by 1
    * Read and display the calibration images
    read_image (ImageL, ImgPath + 'l' + I$'02d')
    read_image (ImageR, ImgPath + 'r' + I$'02d')
    dev_set_window (WindowHandle1)
    dev_display (ImageL)
    dev_set_window (WindowHandle2)
    dev_display (ImageR)
    * Find the calibration plate and store observations
    * in the calibration data model
    find_calib_object (ImageL, CalibDataID, 0, 0, I, [], [])
    find_calib_object (ImageR, CalibDataID, 1, 0, I, [], [])
    * Visualize the extracted calibration marks and the
    * coordinate system defined by the estimated pose.
    
    visualize_observation_results (ImageL, CalibDataID, 0, I, WindowHandle1)    
    visualize_observation_results (ImageR, CalibDataID, 1, I, WindowHandle2)
    wait_seconds (.2)
endfor
* Perform the actual calibration
calibrate_cameras (CalibDataID, Errors)
* Get the calibrated camera parameters
get_calib_data (CalibDataID, 'camera', 0, 'params', CamParamL)
get_calib_data (CalibDataID, 'camera', 1, 'params', CamParamR)
* Since the left camera is the reference camera for the
* calib data model, the pose of the right camera is its
* pose relative to the left camera
get_calib_data (CalibDataID, 'camera', 1, 'pose', cLPcR)
* Store the results into files. Here, you can either store the
* individual results
write_cam_par (CamParamL, 'cam_left-125.dat')
write_cam_par (CamParamR, 'cam_right-125.dat')
write_pose (cLPcR, 'pos_right2left.dat')
* or you store the complete camera setup model and thus enable
* the later access to all contained parameters
get_calib_data (CalibDataID, 'model', 'general', 'camera_setup_model', CameraSetupModelID)
write_camera_setup_model (CameraSetupModelID, 'stereo_camera_setup.csm')
* Generate the rectification maps
gen_binocular_rectification_map (MapL, MapR, CamParamL, CamParamR, cLPcR, 1, 'geometric', 'bilinear', RectCamParL, RectCamParR, CamPoseRectL, CamPoseRectR, RectLPosRectR)
* Read in a stereo image pair, acquired with the stereo camera system,
* which has been calibrated, just now.
read_image (ImageL, ImgPath + 'la_4')
read_image (ImageR, ImgPath + 'ra_4')
rgb1_to_gray(ImageL,ImageL)
rgb1_to_gray(ImageR,ImageR)

* Rectify the stereo images and display them
map_image (ImageL, MapL, ImageRectifiedL)
map_image (ImageR, MapR, ImageRectifiedR)
* Check the epipolar constraint on the rectified images,
* (the differences of the features' row coordinates should be small)
* and visualize the result (including some corresponding epipolar lines)
dev_close_window ()
dev_close_window ()

*get_image_size (ImageRectifiedL, WidthL, HeightL)
*上面这个因为把图像斜着摆放会放大窗口尺寸所以还是保持原状
dev_open_window (0, 0, WidthL, HeightL, 'white', WindowHandle1)
set_display_font (WindowHandle1, 11, 'mono', 'true', 'false')
dev_display (ImageRectifiedL)
disp_message (WindowHandle1, 'Left rectified image', 'window', 10, 10, 'black', 'true')
* 
* Display the right recitified image
dev_open_window (0, WidthL + 10, WidthL, HeightL, 'white', WindowHandle2)
set_display_font (WindowHandle2, 11, 'mono', 'true', 'false')
dev_display (ImageRectifiedR)
disp_message (WindowHandle2, 'Right rectified image', 'window', 10, 10, 'black', 'true')
disp_continue_message (WindowHandle2, 'black', 'true')
stop ()


binocular_disparity (ImageRectifiedL, ImageRectifiedR, DisparityImage, Score, 'ncc', 17, 17, 5, 10, 40, 1, 0.1, 'left_right_check', 'none')
* 
* Fill the gaps in the disparity image
get_domain (DisparityImage, RegionInpainting)
complement (RegionInpainting, RegionInpainting)
full_domain (DisparityImage, DisparityImage)
harmonic_interpolation (DisparityImage, RegionInpainting, DisparityImage, 0.001)
* 
* Display the disparity image
dev_set_window (WindowHandle1)
dev_display (DisparityImage)
disp_message (WindowHandle1, 'Disparity image', 'window', 10, 10, 'black', 'true')
* 
* Compute the 3D coordinates
* *******************************************
* 
* Transform the disparity image into images X, Y and Z.
* The gray values in X, Y and Z represent the x, y, and z
* coordinates of the pixels (Row, Column).
disparity_image_to_xyz (DisparityImage, X, Y, Z, RectCamParL, RectCamParR, RectLPosRectR)
* 
* Visualize the 3D points in the 3D space
* *******************************************
* 
* Scale Z (for better visualization)
scale_image (Z, Z, 2, 0)
* 
* Reduce impact of inaccuracies in visualization
threshold (Z, Regions, 0.3, 0.7)
reduce_domain (Z, Regions, ZThresholded)
* 
* Add the gray values to the point cloud.
xyz_attrib_to_object_model_3d (X, Y, ZThresholded, ImageRectifiedL,'&gray', ObjectModel3D)
* 
* Visualize the result
prepare_object_model_3d (ObjectModel3D, 'segmentation', 'true', 'max_area_holes', 100)
create_pose (-0.05, -0.45, 1.7, 300, 12, 180, 'Rp+T', 'gba', 'point', Pose)
visualize_object_model_3d (WindowHandle2, ObjectModel3D, [], Pose, 'color_attrib', '&gray', [], [], [], PoseOut)

clear_object_model_3d (ObjectModel3D)

猜你喜欢

转载自blog.csdn.net/weixin_51229250/article/details/124729906