For stereo calibration using Charuco board do object and image points have to be equal in left and right camera images?

Hi,

I am trying to calibrate two mono cameras connected to my Oak FFC 3P. I wrote code to calibrate using a Charuco board after I gathered the images. I made sure to get all the coverage in the images. But when I run, my code, I get the following error:

shivam157@ubuntu:~/depthai$ python3 Custom_calibrate.py 
Calibration results for left:
Reprojection Error: 0.3296308861833062
Camera Matrix:
[[939.30631865   0.         639.5       ]
 [  0.         939.30631865 399.5       ]
 [  0.           0.           1.        ]]
Distortion Coefficients:
[[ 0.05503208 -0.08982793  0.          0.          0.02675822]]

Calibration results for right:
Reprojection Error: 0.3007155053282476
Camera Matrix:
[[925.38888455   0.         639.5       ]
 [  0.         925.38888455 399.5       ]
 [  0.           0.           1.        ]]
Distortion Coefficients:
[[ 0.05063603 -0.05049415  0.          0.         -0.03526356]]

Calibration complete. Proceeding to stereo calibration...
Image  0
Number of object points for left camera:  72
Number of image points for left camera:  70
Number of object points for right camera:  72
Number of image points for right camera:  68
Image  1
Number of object points for left camera:  72
Number of image points for left camera:  64
Number of object points for right camera:  72
Number of image points for right camera:  30
Image  2
Number of object points for left camera:  72
Number of image points for left camera:  72
Number of object points for right camera:  72
Number of image points for right camera:  50
Image  3
Number of object points for left camera:  72
Number of image points for left camera:  72
Number of object points for right camera:  72
Number of image points for right camera:  68
Image  4
Number of object points for left camera:  72
Number of image points for left camera:  72
Number of object points for right camera:  72
Number of image points for right camera:  72
## (after 39 images)

Traceback (most recent call last):
  File "/home/shivam157/depthai/Custom_calibrate.py", line 91, in <module>
    ret, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
cv2.error: OpenCV(4.5.5) /io/opencv/modules/calib3d/src/calibration.cpp:3358: error: (-2:Unspecified error) in function 'void cv::collectCalibrationData(cv::InputArrayOfArrays, cv::InputArrayOfArrays, cv::InputArrayOfArrays, int, cv::Mat&, cv::Mat&, cv::Mat*, cv::Mat&)'
> Number of object and image points must be equal (expected: 'numberOfObjectPoints == numberOfImagePoints'), where
>     'numberOfObjectPoints' is 72
> must be equal to
>     'numberOfImagePoints' is 70

My code is the following:

import numpy as np
import cv2
import cv2.aruco as aruco
import glob

# Define the Charuco board parameters
charuco_square_length = 0.038  # Square side length (in meters)
charuco_marker_length = 0.028  # Marker side length (in meters)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
board = aruco.CharucoBoard_create(
    13, 7, charuco_square_length, charuco_marker_length, aruco_dict)

# Calibration flags and initial camera matrix setup
calibration_flags = cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_FIX_PRINCIPAL_POINT
cameraMatrixInit = np.array(
    [[1000., 0., 320.], [0., 1000., 240.], [0., 0., 1.]])

# Paths and storage for calibration data
# Data storage for calibration
allCorners_left = []
allIds_left = []
allCorners_right = []
allIds_right = []
objectPoints_left = []  # Separate object points for left
objectPoints_right = []  # Separate object points for right

# Loop through images as before, collect allCorners and allIds
for cam_id in ['left', 'right']:
    image_directory = f'./dataset/{cam_id}/'
    images = glob.glob(image_directory + '*.png')
    for img_path in images:
        img = cv2.imread(img_path)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        corners, ids, rejected = aruco.detectMarkers(gray, aruco_dict)
        if len(corners) > 0:
            ret, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                corners, ids, gray, board)
            if ret and charuco_corners is not None and charuco_ids is not None:
                if cam_id == 'left':
                    allCorners_left.append(charuco_corners)
                    allIds_left.append(charuco_ids)
                    objectPoints_left.append(board.chessboardCorners)
                else:
                    allCorners_right.append(charuco_corners)
                    allIds_right.append(charuco_ids)
                    objectPoints_right.append(board.chessboardCorners)

    # Calibrate the camera using collected points
    if cam_id == 'left':
        ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(
            charucoCorners=allCorners_left,
            charucoIds=allIds_left,
            board=board,
            imageSize=gray.shape[::-1],
            cameraMatrix=cameraMatrixInit.copy(),
            distCoeffs=None,
            flags=calibration_flags
        )
    else:
        ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(
            charucoCorners=allCorners_right,
            charucoIds=allIds_right,
            board=board,
            imageSize=gray.shape[::-1],
            cameraMatrix=cameraMatrixInit.copy(),
            distCoeffs=None,
            flags=calibration_flags
        )

    print(f"Calibration results for {cam_id}:")
    print(f"Reprojection Error: {ret}")
    print(f"Camera Matrix:\n{camera_matrix}")
    print(f"Distortion Coefficients:\n{dist_coeffs}\n")


print("Calibration complete. Proceeding to stereo calibration...")
# print the number of object points and image points for each camera in each 39 image
for i in range(39):

    print("Image ", i)
    print("Number of object points for left camera: ",
          len(objectPoints_left[i]))
    print("Number of image points for left camera: ", len(allCorners_left[i]))
    print("Number of object points for right camera: ",
          len(objectPoints_right[i]))
    print("Number of image points for right camera: ",
          len(allCorners_right[i]))


# Perform stereo calibration using the correct objectPoints
ret, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    # Assume the same number of points for left and right
    objectPoints=objectPoints_right,
    imagePoints1=allCorners_left,
    imagePoints2=allCorners_right,
    cameraMatrix1=cameraMatrixInit,
    distCoeffs1=None,
    cameraMatrix2=cameraMatrixInit,
    distCoeffs2=None,
    imageSize=gray.shape[::-1],
    criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-5),
    flags=cv2.CALIB_FIX_INTRINSIC
)

print("Stereo Calibration results:")
print("Rotation Matrix:\n", R)
print("Translation Vector:\n", T)
print("Essential Matrix:\n", E)
print("Fundamental Matrix:\n", F)

# Compute rectification transforms
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
                                                  gray.shape[::-1], R, T)

print("Rectification Matrices and Projection Matrices:")
print("R1:\n", R1)
print("R2:\n", R2)
print("P1:\n", P1)
print("P2:\n", P2)
print("Disparity-to-depth Mapping Matrix (Q):\n", Q)

The following is the link to the dataset I have created:
Google drive link

What am I doing wrong? Please help.

Okay, so I have an answer to my question but not the solution. In the documentation of OpenCV it says that :

objectPoints Vector of vectors of the calibration pattern points. The same structure as in calibrateCamera. For each pattern view, both cameras need to see the same object points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to be equal for each i.

where imagepoint1 and imagepoint2 are left and right cameras.

So, is it impossible to calibrate if image points are not equal to object points? I have taken good pictures of the Charuco board but still it doesn’t detect all the object points. How do I calibrate now?

I think the following code is making sure that object points = image points in a pair. Across pair there is no restriction so the following code should work:

import numpy as np
import cv2
import cv2.aruco as aruco
import glob

# Define the Charuco board parameters
charuco_square_length = 0.038  # Square side length (in meters)
charuco_marker_length = 0.028  # Marker side length (in meters)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
board = aruco.CharucoBoard_create(
    13, 7, charuco_square_length, charuco_marker_length, aruco_dict)

# Calibration flags and initial camera matrix setup
calibration_flags = cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_FIX_PRINCIPAL_POINT
cameraMatrixInit = np.array(
    [[1000., 0., 320.], [0., 1000., 240.], [0., 0., 1.]])

# Storage for calibration data
allCorners = {'left': [], 'right': []}
allIds = {'left': [], 'right': []}
image_directory = './dataset/'

# Collect all corners and IDs first
for cam_id in ['left', 'right']:
    images = glob.glob(f'{image_directory}{cam_id}/*.png')
    for img_path in images:
        img = cv2.imread(img_path)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        corners, ids, rejected = aruco.detectMarkers(gray, aruco_dict)
        if ids is not None:
            ret, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(
                corners, ids, gray, board)
            if ret > 0:
                allCorners[cam_id].append(charuco_corners)
                allIds[cam_id].append(charuco_ids)

# Match points and perform calibration
matched_object_points = []
matched_corners_left = []
matched_corners_right = []
for i in range(min(len(allCorners['left']), len(allCorners['right']))):
    # Ensure matching ids in both left and right images
    common_ids = np.intersect1d(allIds['left'][i], allIds['right'][i])
    if len(common_ids) > 0:
        indices_left = np.isin(allIds['left'][i], common_ids).flatten()
        indices_right = np.isin(allIds['right'][i], common_ids).flatten()

        matched_object_points.append(board.chessboardCorners[common_ids, :])
        matched_corners_left.append(allCorners['left'][i][indices_left])
        matched_corners_right.append(allCorners['right'][i][indices_right])

# Now use matched corners to perform stereo calibration
if matched_corners_left and matched_corners_right:
    ret, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
        objectPoints=matched_object_points,
        imagePoints1=matched_corners_left,
        imagePoints2=matched_corners_right,
        cameraMatrix1=None,
        distCoeffs1=None,
        cameraMatrix2=None,
        distCoeffs2=None,
        imageSize=gray.shape[::-1],
        criteria=(cv2.TERM_CRITERIA_EPS +
                  cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-5),
        flags=0
    )
    print("Stereo Calibration Reprojection Error:\n", ret)
    print("camera matrix left:\n", cameraMatrix1)
    print("camera matrix right:\n", cameraMatrix2)
    print("Distortion coeffecient left:\n", distCoeffs1)
    print("Distortion coeffecient right:\n", distCoeffs2)
    print("\nStereo Calibration results:")
    print("Rotation Matrix:\n", R)
    print("Translation Vector:\n", T)
    print("Essential Matrix:\n", E)
    print("Fundamental Matrix:\n", F)