Hello,
I want to calibrate stereo cameras pair and create a triangulated view of detected points, but the triangulation returns quite distorted object.
I use 2 cameras, 100 calibration image pairs.
Both cameras are the same type.
Resolution is 1280x720
Here’s an example of images I use:
Right image
The code is as follows:
SQ_WIDTH = 12
SQ_HEIGHT = 9
SQUARE_LENGTH = 0.040
MARKER_LENGTH = 0.030
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
board = cv2.aruco.CharucoBoard((SQ_WIDTH, SQ_HEIGHT), SQUARE_LENGTH, MARKER_LENGTH, dictionary)
# Right camera
[repr_errorR, mtxR, distR, rvecsR, tvecsR] = cv2.aruco.calibrateCameraCharuco(imgpoints_corners_R, corner_idsR, board, grayR.shape[::-1], None, None)
# Left camera
[repr_errorL, mtxL, distL, rvecsL, tvecsL] = cv2.aruco.calibrateCameraCharuco(imgpoints_corners_L, corner_idsL, board, grayL.shape[::-1], None, None)
[INFO] Reprojection error for Right camera: 0.18419312441875527
[INFO] Reprojection error for Left camera: 0.18229800250761574
retS, mtxLS, distLS, mtxRS, distRS, R, T, E, F = cv2.stereoCalibrate(stereo_objpoints,
stereo_cornersL,stereo_cornersR,
mtxL, distL,
mtxR,distR,
grayL.shape[::-1],
criteria,
stereocalib_flags)
[INFO] Stereo calibration reprojection error: 0.4741365312866202
Resulting calibration data are:
mtxR = np.array([[626.20510139, 0. , 633.79508752],
[ 0. , 625.95696507, 366.69429711],
[ 0. , 0. , 1. ]])
mtxLS = np.array([[619.61110357, 0. , 640.58960374],
[ 0. , 619.41011075, 367.13741063],
[ 0. , 0. , 1. ]])
distRS = np.array([[ 1.09526185e-01, -1.04717454e-01, 6.01798088e-05, -1.26580066e-03, 5.37722921e-02]])
distLS = np.array([[ 0.10360878, -0.09521651, -0.00036414, -0.00022415, 0.04692143]])
R = np.array([[ 0.97487946, 0.0731213 , 0.21038847],
[-0.08119302, 0.99624754, 0.02997549],
[-0.20740715, -0.04630456, 0.97715821]])
T = np.array([[-0.21914937],
[ 0.06513346],
[ 0.0963156 ]])
I do several checks to determine calibration quality, such as drawing epipolar lines and rectification:
# drawing epipolar lines
S = cv2.stereoRectify(mtxL, distL, mtxR, distR, size, RL[:,:3], RL[:,-1])
R1, R2, P1, P2, Q, roi1, roi2 = S
for img_id, (pts1, pts2) in enumerate(zip(stereo_cornersL, stereo_cornersR)):
img1 = cv2.imread(left_images[img_id])
img2 = cv2.imread(right_images[img_id])
lines1 = cv2.computeCorrespondEpilines(pts2, 2, F)
lines2 = cv2.computeCorrespondEpilines(pts1, 1, F)
img1_show, img2 = drawlines(img1, img2, lines1.squeeze(),
pts1, pts2)
#Making copy because drawlines function modifies the image
img1_show = np.copy(img1_show)
img3_show, img4 = drawlines(img2, img1, lines2.squeeze(),
pts2, pts1)
#rectification
map1x, map1y = cv2.initUndistortRectifyMap(mtxL, distL, R1, P1, size, cv2.CV_16SC2)
map2x, map2y = cv2.initUndistortRectifyMap(mtxR, distR, R2, P2, size, cv2.CV_16SC2)
img1_remap = cv2.remap(imgL, map1x, map1y, cv2.INTER_LINEAR)
img2_remap = cv2.remap(imgR, map2x, map2y, cv2.INTER_LINEAR)
img1_remap = cv2.rectangle(img1_remap, roi1[:2], roi1[2:], (0,255,0), 3)
img2_remap = cv2.rectangle(img2_remap, roi2[:2], roi1[2:], (0,255,0), 3)
img_rect_stack = np.hstack((img1_remap, img2_remap))
But on triangulaton I get really distorted target.
Triangulated target and cameras.
I reproject triangulated points and the result is distorted as well:
Left reprojected charuco points/grid
Right reprojected charuco points/grid
My triangulation code is as follows:
#projection matrices
PR = mtxR @ np.eye(4)[:3]
RL = np.hstack(R, T)
PL = mtxL @ RL
def projectPoints(X, K, R, t, Kd):
#Source: https://github.com/CMU-Perceptual-Computing-Lab/panoptic-toolbox/blob/master/python/demo_3Dkeypoints_reprojection_hd.py
x = np.asarray(R@X + t)
x[0:2,:] = x[0:2,:]/x[2,:]
r = x[0,:]*x[0,:] + x[1,:]*x[1,:]
x[0,:] = x[0,:]*(1 + Kd[0]*r + Kd[1]*r*r + Kd[4]*r*r*r) + 2*Kd[2]*x[0,:]*x[1,:] + Kd[3]*(r + 2*x[0,:]*x[0,:])
x[1,:] = x[1,:]*(1 + Kd[0]*r + Kd[1]*r*r + Kd[4]*r*r*r) + 2*Kd[3]*x[0,:]*x[1,:] + Kd[2]*(r + 2*x[1,:]*x[1,:])
x[0,:] = K[0,0]*x[0,:] + K[0,1]*x[1,:] + K[0,2]
x[1,:] = K[1,0]*x[0,:] + K[1,1]*x[1,:] + K[1,2]
return x
def draw_charuco_grid_2d(img, board, pointIds, points, color="green"):
if color == "green":
color = (0,255,0)
elif color == "red":
color = (0,0,255)
else:
raise ValueError("Invalid color. Only green and red are supported.")
board_idxs = np.arange(0, len(board.getChessboardCorners()))
board_idxs = board_idxs.reshape(board.getChessboardSize()[1]-1,
board.getChessboardSize()[0]-1)
for row in board_idxs:
for el in range(1, len(row)):
point1idx = row[el-1]
point2idx = row[el]
point1idx = np.where(pointIds==point1idx)
point2idx = np.where(pointIds==point2idx)
if len(point1idx[0]) == 0 or len(point2idx[0]) == 0:
continue
pt1 = [int(x) for x in points[point1idx][0]]
pt2 = [int(x) for x in points[point2idx][0]]
img = cv2.line(img, pt1, pt2, color, 2)
for column in board_idxs.T:
for el in range(1, len(column)):
point1idx = column[el-1]
point2idx = column[el]
point1idx = np.where(pointIds==point1idx)
point2idx = np.where(pointIds==point2idx)
if len(point1idx[0]) == 0 or len(point2idx[0]) == 0:
continue
pt1 = [int(x) for x in points[point1idx][0]]
pt2 = [int(x) for x in points[point2idx][0]]
img = cv2.line(img, pt1, pt2, color, 2)
return img
IDX = 0
pointsL = calibration["frame_cornersL"][IDX]
pointsR = calibration["frame_cornersR"][IDX]
idsL = calibration["frame_idsL"][IDX]
idsR = calibration["frame_idsR"][IDX]
imgL = cv2.imread(left_images[IDX])
imgR = cv2.imread(right_images[IDX])
p3d_cv= []
for uv1, uv2 in zip(pointsL.squeeze(), pointsR.squeeze()):
_p3d_cv = cv2.triangulatePoints(PL, PR, uv1, uv2)
p3d_cv.append(_p3d_cv)
p3d_cv = p3d_cv.squeeze()[:,:3] / p3d_cv.squeeze()[:,3].reshape(len(p3d_cv), 1)
projected_points_L_cmu = []
projected_points_R_cmu = []
for p in p3d_cv:
p = p.reshape(3,1)
K = mtxL
R = RL[:3,:3]
t = RL[:,3].reshape(3,1)
Kd = distL.reshape(5,)
ret = projectPoints(p, K, R, t, Kd)
projected_points_L_cmu.append(ret)
for p in p3d_cv:
p = p.reshape(3,1)
K = mtxR
R = RR[:3,:3]
t = RR[:,3].reshape(3,1)
Kd = distR.reshape(5,)
ret = projectPoints(p, K, R, t, Kd)
projected_points_R_cmu.append(ret)
projected_points_L_cmu = np.stack(projected_points_L_cmu)
projected_points_R_cmu = np.stack(projected_points_R_cmu)
projected_points_L_cmu = projected_points_L_cmu.reshape(-1, 1, 3)[:,:,:2]
projected_points_R_cmu = projected_points_R_cmu.reshape(-1, 1, 3)[:,:,:2]
frameL = draw_charuco_grid_2d(imgL, board, idsL, pointsL, color="green")
frameR = draw_charuco_grid_2d(imgR, board, idsR, pointsR, color="green")
frameL = draw_charuco_grid_2d(frameL, board, idsL, projected_points_L_cmu, color="red")
frameR = draw_charuco_grid_2d(frameR, board, idsR, projected_points_R_cmu, color="red")
I don’t know what might be the result of such distortion, as seemingly all intermediate steps before triangulation return reasonable results.
Any help would be appreciated.