Huateng Weishi black and white camera calibration

1. Camera parameters

Resolution: 2448*2048@40FPS
Lens: 16mm
Object distance: about 300mm
Field of view: 158mm in x direction, 132mm in y direction (calculated according to the focal length of your own lens)

2. Calibration version

Due to the small field of view, a small calibration board is needed. Use the method of online generation of the calibration board provided by Yuxiang ROS , use the Checkerboard, and change the Checker Widths parameter to: 8mm to ensure that the entire calibration board can be seen at a distance of 300mm. Clear the entire calibration board.

3. Calibration

3.1 Configure Huateng Vision Camera

Encapsulate Huateng Weishi camera configuration into an App

#coding=utf-8
import cv2
import numpy as np
import mvsdk
import platform

def config_cam():
	# 枚举相机
	DevList = mvsdk.CameraEnumerateDevice()
	nDev = len(DevList)
	if nDev < 1:
		print("No camera was found!")
		return

	for i, DevInfo in enumerate(DevList):
		print("{}: {} {}".format(i, DevInfo.GetFriendlyName(), DevInfo.GetPortType()))
	i = 0 if nDev == 1 else int(input("Select camera: "))
	DevInfo = DevList[i]
	print(DevInfo)

	# 打开相机
	hCamera = 0
	try:
		hCamera = mvsdk.CameraInit(DevInfo, -1, -1)
	except mvsdk.CameraException as e:
		print("CameraInit Failed({}): {}".format(e.error_code, e.message))
		return

	# 获取相机特性描述
	cap = mvsdk.CameraGetCapability(hCamera)

	# 判断是黑白相机还是彩色相机
	monoCamera = (cap.sIspCapacity.bMonoSensor != 0)

	# 黑白相机让ISP直接输出MONO数据,而不是扩展成R=G=B的24位灰度
	if monoCamera:
		mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
	else:
		mvsdk.CameraSetIspOutFormat(hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)

	# 相机模式切换成连续采集
	mvsdk.CameraSetTriggerMode(hCamera, 0)

	# 手动曝光,曝光时间30ms
	mvsdk.CameraSetAeState(hCamera, 0)
	mvsdk.CameraSetExposureTime(hCamera, 30 * 1000)

	# 让SDK内部取图线程开始工作
	mvsdk.CameraPlay(hCamera)

	# 计算RGB buffer所需的大小,这里直接按照相机的最大分辨率来分配
	FrameBufferSize = cap.sResolutionRange.iWidthMax * cap.sResolutionRange.iHeightMax * (1 if monoCamera else 3)

	# 分配RGB buffer,用来存放ISP输出的图像
	# 备注:从相机传输到PC端的是RAW数据,在PC端通过软件ISP转为RGB数据(如果是黑白相机就不需要转换格式,但是ISP还有其它处理,所以也需要分配这个buffer)
	pFrameBuffer = mvsdk.CameraAlignMalloc(FrameBufferSize, 16)

	return hCamera,pFrameBuffer

def convert_to_opencv(hCamera,pFrameBuffer):
	pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, 200)
	mvsdk.CameraImageProcess(hCamera, pRawData, pFrameBuffer, FrameHead)
	mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)

	# windows下取到的图像数据是上下颠倒的,以BMP格式存放。转换成opencv则需要上下翻转成正的
	# linux下直接输出正的,不需要上下翻转
	if platform.system() == "Windows":
		mvsdk.CameraFlipFrameBuffer(pFrameBuffer, FrameHead, 1)

	# 此时图片已经存储在pFrameBuffer中,对于彩色相机pFrameBuffer=RGB数据,黑白相机pFrameBuffer=8位灰度数据
	# 把pFrameBuffer转换成opencv的图像格式以进行后续算法处理
	frame_data = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(pFrameBuffer)
	frame = np.frombuffer(frame_data, dtype=np.uint8)
	frame = frame.reshape(
	(FrameHead.iHeight, FrameHead.iWidth, 1 if FrameHead.uiMediaType == mvsdk.CAMERA_MEDIA_TYPE_MONO8 else 3))
	frame = cv2.resize(frame, (2448, 2048), interpolation=cv2.INTER_LINEAR)
	return frame

def turn_off_camera(hCamera,pFrameBuffer):
	# 关闭相机
	mvsdk.CameraUnInit(hCamera)
	# 释放帧缓存
	mvsdk.CameraAlignFree(pFrameBuffer)


def main_loop():
	hCamera,pFrameBuffer = config_cam()
	while (cv2.waitKey(1) & 0xFF) != ord('q'):
		# 从相机取一帧图片
		try:
			frame = convert_to_opencv(hCamera,pFrameBuffer)

			# 顺时针旋转90度
			ROTATE_90_CLOCKWISE = 0
			rotated_img = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)

			# frame = cv2.resize(frame, (640,480), interpolation = cv2.INTER_LINEAR)

			# cv2.imshow("Press q to end", frame)
			# cv2.imshow("w", rotated_img)
			
		except mvsdk.CameraException as e:
			if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
				print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message) )

	turn_off_camera(hCamera,pFrameBuffer)

# def main():
# 	try:
# 		main_loop()
# 	finally:
# 		cv2.destroyAllWindows()
#
# main()

3.2 Capture pictures

First, convert the Huateng Weishi camera to the opencv format, and then collect pictures. In this code, press "space" once to collect a picture, and the picture will be automatically stored in a folder named "cilabrate".

#coding=utf-8
import cv2
import numpy as np
import mvsdk
import platform
from cv_grab_App import *
from Ellipse_dectecton import *

def main_loop():
	hCamera, pFrameBuffer = config_cam()
	i = 0
	while (cv2.waitKey(1) & 0xFF) != ord('q'):
		# 从相机取一帧图片
		try:
			frame = convert_to_opencv(hCamera, pFrameBuffer)
			cv2.imshow("Press q to end", frame)
			if cv2.waitKey(1) == ord(' '):
				cv2.imwrite("./cilabrate/{}.jpg".format(i), frame)
				i = i+1
		except mvsdk.CameraException as e:
			if e.error_code != mvsdk.CAMERA_STATUS_TIME_OUT:
				print("CameraGetImageBuffer failed({}): {}".format(e.error_code, e.message))

	turn_off_camera(hCamera, pFrameBuffer)

def main():
	try:
		main_loop()
	finally:
		cv2.destroyAllWindows()

main()

3.3 Calibration

Modify the corresponding path. At the same time, because the camera is a black and white camera, but the image data obtained is still 3-channel data, so the image should be processed to gray=img[:,:,1]change the data to single-channel.

import cv2
import numpy as np
import glob

# 找棋盘格角点
# 阈值
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
#棋盘格模板规格
w = 10   #内角点个数,内角点是和其他格子连着的点
h = 7

# 世界坐标系中的棋盘格点,例如(0,0,0), (1,0,0), (2,0,0) ....,(8,5,0),去掉Z坐标,记为二维矩阵
objp = np.zeros((w*h,3), np.float32)
objp[:,:2] = np.mgrid[0:w,0:h].T.reshape(-1,2)
# 储存棋盘格角点的世界坐标和图像坐标对
objpoints = [] # 在世界坐标系中的三维点
imgpoints = [] # 在图像平面的二维点

images = glob.glob('./cilabrate/*.jpg')
for fname in images:
    img = cv2.imread(fname)
    gray = img[:,:,1]
    print("gray.shape:",gray.shape)
    # 找到棋盘格角点
    # 棋盘图像(8位灰度或彩色图像)  棋盘尺寸  存放角点的位置
    ret, corners = cv2.findChessboardCorners(gray, (w,h),None)
    # 如果找到足够点对,将其存储起来
    if ret == True:
        # 角点精确检测
        # 输入图像 角点初始坐标 搜索窗口为2*winsize+1 死区 求角点的迭代终止条件
        cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
        objpoints.append(objp)
        imgpoints.append(corners)
        # 将角点在图像上显示
        cv2.drawChessboardCorners(img, (w,h), corners, ret)
        cv2.imshow('findCorners',img)
        cv2.waitKey(1000)
cv2.destroyAllWindows()
#标定、去畸变
# 输入:世界坐标系里的位置 像素坐标 图像的像素尺寸大小 3*3矩阵,相机内参数矩阵 畸变矩阵
# 输出:标定结果 相机的内参数矩阵 畸变系数 旋转矩阵 平移向量
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# mtx:内参数矩阵
# dist:畸变系数
# rvecs:旋转向量 (外参数)
# tvecs :平移向量 (外参数)
print (("ret:"),ret)
print (("mtx:\n"),mtx)        # 内参数矩阵
print (("dist:\n"),dist)      # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
print (("rvecs:\n"),rvecs)    # 旋转向量  # 外参数
print (("tvecs:\n"),tvecs)    # 平移向量  # 外参数
# 去畸变
img2 = cv2.imread('./cilabrate/1.jpg')
h,w = img2.shape[:2]
# 我们已经得到了相机内参和畸变系数,在将图像去畸变之前,
# 我们还可以使用cv.getOptimalNewCameraMatrix()优化内参数和畸变系数,
# 通过设定自由自由比例因子alpha。当alpha设为0的时候,
# 将会返回一个剪裁过的将去畸变后不想要的像素去掉的内参数和畸变系数;
# 当alpha设为1的时候,将会返回一个包含额外黑色像素点的内参数和畸变系数,并返回一个ROI用于将其剪裁掉
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h)) # 自由比例参数

dst = cv2.undistort(img2, mtx, dist, None, newcameramtx)
# 根据前面ROI区域裁剪图片
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.jpg',dst)

# 反投影误差
# 通过反投影误差,我们可以来评估结果的好坏。越接近0,说明结果越理想。
# 通过之前计算的内参数矩阵、畸变系数、旋转矩阵和平移向量,使用cv2.projectPoints()计算三维点到二维图像的投影,
# 然后计算反投影得到的点与图像上检测到的点的误差,最后计算一个对于所有标定图像的平均误差,这个值就是反投影误差。
total_error = 0
for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
    error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
    total_error += error
print (("total error: "), total_error/len(objpoints))

Reference link:
[Python Computer Vision] Camera Calibration (Camera Calibration)
generates checkerboard online

Guess you like

Origin blog.csdn.net/weixin_41837701/article/details/131845523