Fisheye Stereo ChAruco calibration

Hello,

I am attempting to calibrate a wide angle lens stereo camera (125º) with the method in the title, but I am not able to make the code work. My initial attempt was to use a normal chessboard to obtain the calibration images with the fisheye model, although this works, I am unable to achieve a low enough error (around 0.1). After making some research I adapted my code to do the following:

  1. Remove images very close to the edge (the reasoning behind this is exposed in my previous post https://forum.opencv.org/t/problems-with-fisheye-stereo-calibration/20201)
  2. Remove images with potentially fipped points or corners
  3. Monocularly calibrate each camera as initial guesses for the stereoCalibration

After attempting to take more and better images without success, I decided to implement a charuco calibration since I suspected that it was the (1) step that was avoiding my model to get enough information around the borders. But I get the following error in the stereo calibration function:

retL, K_L, D_L, rvecsL, tvecsL = cv2.fisheye.calibrate(
                                     ^^^^^^^^^^^^^^^^^^^^^^
cv2.error: OpenCV(4.11.0) D:\bld\libopencv_1739279452246\work\modules\calib3d\src\fisheye.cpp:710: error: (-215:Assertion failed) !objectPoints.empty() && !imagePoints.empty() && objectPoints.total() == imagePoints.total() in function 'cv::fisheye::calibrate'

Below is my complete code (including charuco and simple chessboard).

Thanks in advanced.

import cv2
import numpy as np
import os, sys, glob

REPROJ_ERROR_THRESHOLD = 2 # px
BLACK_PIXEL_FRACTION_THRESHOLD = 0.8
GRAY_PIXEL_FRACTION_THRESHOLD = 0.8

# https://github.com/jamiemilsom/Fisheye_ChArUco_Calibration  
# https://github.com/pettod/charuco-corner-detection

def image_selector(calibration_images, frame_size, chessboard_size, square_size, debug_dirs):
    imagesL, imagesR = calibration_images
    debug_corners_dir, debug_rect_dir, debug_rejected = debug_dirs

    objpoints = []
    imgpoints_L = []
    imgpoints_R = []

    valid_imgs_L = []
    valid_imgs_R = []

    objp = np.zeros((chessboard_size[0] * chessboard_size[1], 1, 3), np.float32)
    objp[:, 0, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2) * square_size
    loadedX, loadedY = cv2.imread(imagesL[0], cv2.IMREAD_GRAYSCALE).shape[::-1]

    for i, (imgL_path, imgR_path) in enumerate(zip(imagesL, imagesR)):
        imgL = cv2.imread(imgL_path)
        imgR = cv2.imread(imgR_path)

        grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
        grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

        retL, cornersL = cv2.findChessboardCorners(grayL, chessboard_size, None)
        retR, cornersR = cv2.findChessboardCorners(grayR, chessboard_size, None)

        if not retL or not retR:
            print(f"No corners found for {imgL_path}. Skipping.")
            continue  # Skip this pair and continue with the next one

        imgL_debug = imgL.copy()
        imgR_debug = imgR.copy()

        cv2.drawChessboardCorners(imgL_debug, chessboard_size, cornersL, retL)
        cv2.drawChessboardCorners(imgR_debug, chessboard_size, cornersR, retR)
        debug_corner_img = np.hstack((imgL_debug, imgR_debug))

        cornersL = np.float32(cornersL)
        cornersR = np.float32(cornersR)

        minRx, maxRx = np.min(cornersR[:, :, 0]), np.max(cornersR[:, :, 0])
        minRy, maxRy = np.min(cornersR[:, :, 1]), np.max(cornersR[:, :, 1])
        minLx, maxLx = np.min(cornersL[:, :, 0]), np.max(cornersL[:, :, 0])
        minLy, maxLy = np.min(cornersL[:, :, 1]), np.max(cornersL[:, :, 1])

        border_threshold_x = loadedX / 10
        border_threshold_y = loadedY / 10

        # if minRx < border_threshold_x or minLx < border_threshold_x or \
        #    minRy < border_threshold_y or minLy < border_threshold_y:
        #     print(f"Chessboard too close to the border in {imgL_path}. Skipping.")
        #     cv2.imwrite(f"{debug_rejected}/cutted_rejected_{i}.png", debug_corner_img)
        #     continue  # Skip this pair

        cv2.imwrite(f"{debug_corners_dir}/detected_{i}.png", debug_corner_img)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        winsize = (11, 11)
        cornersL = cv2.cornerSubPix(grayL, cornersL, winsize, (-1, -1), criteria)
        cornersR = cv2.cornerSubPix(grayR, cornersR, winsize, (-1, -1), criteria)

        diff = cornersL - cornersR
        lengths = np.linalg.norm(diff, axis=2)
        total_diff = np.sum(lengths)

        if total_diff > 5000.0:
            print(f"Broken stereo pair detected for {imgL_path}. Diff: {total_diff}. Skipping.")
            cv2.imwrite(f"{debug_rejected}/diff_rejected_{i}.png", debug_corner_img)
            continue  # Skip this pair

        # Save valid data
        objpoints.append(objp)
        imgpoints_L.append(cornersL)
        imgpoints_R.append(cornersR)
        valid_imgs_L.append(imgL_path)
        valid_imgs_R.append(imgR_path)
        
    return valid_imgs_L, valid_imgs_R, objpoints, imgpoints_L, imgpoints_R

def charucho_image_selector(calibration_images, aruco_board, aruco_dict, aruco_params, debug_dir):
    imagesL, imagesR = calibration_images

    valid_imgs_L = []
    valid_imgs_R = []

    objpoints = []
    imgpoints_L = []
    imgpoints_R = []

    for i, (imgL_path, imgR_path) in enumerate(zip(imagesL, imagesR)):
        imgL = cv2.imread(imgL_path)
        imgR = cv2.imread(imgR_path)

        grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
        grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)

        corners_L, ids_L, rc_L = cv2.aruco.detectMarkers(grayL, aruco_dict, parameters=aruco_params)
        corners_R, ids_R, rc_R = cv2.aruco.detectMarkers(grayR, aruco_dict, parameters=aruco_params)

        if ids_L is None or ids_R is None:
            print(f"No markers found for {imgL_path}. Skipping.")
            continue  # Skip this pair and continue with the next one

        # Refine the markers
        corners_L_ref, ids_L_ref, rc_L_ref, rec_idx_L = cv2.aruco.refineDetectedMarkers(
            grayL, aruco_board, corners_L, ids_L, rc_L, parameters=aruco_params
        )
        corners_R_ref, ids_R_ref, rc_R_ref, rec_idx_R = cv2.aruco.refineDetectedMarkers(
            grayR, aruco_board, corners_R, ids_R, rc_R, parameters=aruco_params
        )

        # Use markers to find corners
        if ids_L_ref is None or ids_R_ref is None and \
            len(ids_L_ref) <= 0 and len(ids_R_ref) <= 0:
            print(f"No markers found for {imgL_path}. Skipping.")
            continue

        charuco_retL, charuco_corners_L, charuco_ids_L = cv2.aruco.interpolateCornersCharuco(
            corners_L_ref, ids_L_ref, grayL, aruco_board
        )
        charuco_retR, charuco_corners_R, charuco_ids_R = cv2.aruco.interpolateCornersCharuco(
            corners_R_ref, ids_R_ref, grayR, aruco_board
        )

        if len(charuco_corners_L) <= 4 or len(charuco_corners_R) <= 4:
            print(f"Less than 4 charuco corners found for {imgL_path}. Skipping.")
            continue

        # Save valid data
        valid_imgs_L.append(imgL_path)
        valid_imgs_R.append(imgR_path)

        # objpoints.append(aruco_board.getChessboardCorners()[charuco_ids_R])
        # imgpoints_L.append(charuco_corners_L)
        # imgpoints_R.append(charuco_corners_R)

        # Since with charuco we dont need detected points to be the same
        # we have to take the common ones manually
        common_ids = np.intersect1d(charuco_ids_L.flatten(), charuco_ids_R.flatten())
        common_corners_L = []
        common_corners_R = []
        common_objpoints = []

        board_obj_corners = aruco_board.getChessboardCorners()  # shape (N, 3)
        # For each common ID, extract its image coordinates (from left and right)
        # and the corresponding 3D object point from the board.
        for cid in common_ids:
            idxL = np.where(charuco_ids_L.flatten() == cid)[0][0]
            idxR = np.where(charuco_ids_R.flatten() == cid)[0][0]
            common_corners_L.append(charuco_corners_L[idxL][0])  # charuco_corners shape: (n,1,2)
            common_corners_R.append(charuco_corners_R[idxR][0])
            # Use the board's ordering. The board's getChessboardCorners() returns the
            # 3D coordinates in the board coordinate system. We use the detected ID as index.
            common_objpoints.append(board_obj_corners[cid])

        common_corners_L = np.array(common_corners_L, dtype=np.float32)
        common_corners_R = np.array(common_corners_R, dtype=np.float32)
        common_objpoints = np.array(common_objpoints, dtype=np.float32)
        # Expand dimensions if your calibration function expects each view's points in shape (n, 1, ?)

        # For example, fisheye calibration in OpenCV expects object points as (n, 1, 3)
        common_corners_L = np.expand_dims(common_corners_L, axis=1)  # shape: (n, 1, 2)
        common_corners_R = np.expand_dims(common_corners_R, axis=1)  # shape: (n, 1, 2)
        common_objpoints   = np.expand_dims(common_objpoints, axis=1)    # shape: (n, 1, 3)

        objpoints.append(common_objpoints)
        imgpoints_L.append(common_corners_L)
        imgpoints_R.append(common_corners_R)

        # Draw markers and charuco corners
        imgL_debug = imgL.copy()
        imgR_debug = imgR.copy()

        cv2.aruco.drawDetectedMarkers(imgL_debug, corners_L_ref, ids_L_ref)
        cv2.aruco.drawDetectedMarkers(imgR_debug, corners_R_ref, ids_R_ref)

        cv2.aruco.drawDetectedCornersCharuco(imgL_debug, charuco_corners_L, charuco_ids_L)
        cv2.aruco.drawDetectedCornersCharuco(imgR_debug, charuco_corners_R, charuco_ids_R)
        img_debug = np.hstack((imgL_debug, imgR_debug))

        cv2.imwrite(f"{debug_dir}/markers_{i}.png", img_debug)

    return valid_imgs_L, valid_imgs_R, objpoints, imgpoints_L, imgpoints_R

def mono_calibration(calibration_images, frame_size, debug_dir, 
                            objpoints, imgpoints_L, imgpoints_R, output_dir):
    
    # Create output and debug directories
    output_dir = f"{output_dir}/independent"
    os.makedirs(output_dir, exist_ok=True)

    debug_dir = f"{debug_dir}/independent"
    os.makedirs(debug_dir, exist_ok=True)

    # Camera calibration: Independent Fisheye for Left and Right cameras
    K_L = np.zeros((3, 3), dtype=np.float64)
    D_L = np.zeros((4, 1), dtype=np.float64)
    K_R = np.zeros((3, 3), dtype=np.float64)
    D_R = np.zeros((4, 1), dtype=np.float64)

    # Calibration flags
    flags = (
        cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + 
        cv2.fisheye.CALIB_CHECK_COND + 
        cv2.fisheye.CALIB_FIX_SKEW
    )

    # Independent calibration for the left camera
    retL, K_L, D_L, rvecsL, tvecsL = cv2.fisheye.calibrate(
        objpoints, imgpoints_L, frame_size, K_L, D_L, flags=flags
    )

    # Independent calibration for the right camera
    retR, K_R, D_R, rvecsR, tvecsR = cv2.fisheye.calibrate(
        objpoints, imgpoints_R, frame_size, K_R, D_R, flags=flags
    )

    print(f"Left camera calibration error: {retL}")
    print(f"Right camera calibration error: {retR}")

    # Save parameters for both cameras
    np.save(os.path.join(output_dir, "K_L.npy"), K_L, allow_pickle=False)
    np.save(os.path.join(output_dir, "D_L.npy"), D_L, allow_pickle=False)
    np.save(os.path.join(output_dir, "K_R.npy"), K_R, allow_pickle=False)
    np.save(os.path.join(output_dir, "D_R.npy"), D_R, allow_pickle=False)

    # Rectify images and save debug results
    imagesL, imagesR = calibration_images
    for i, (imgL_path, imgR_path) in enumerate(zip(imagesL, imagesR)):
        imgL = cv2.imread(imgL_path)
        imgR = cv2.imread(imgR_path)

        # Undistort images independently
        map1_L, map2_L = cv2.fisheye.initUndistortRectifyMap(K_L, D_L, np.eye(3), K_L, frame_size, cv2.CV_16SC2)
        map1_R, map2_R = cv2.fisheye.initUndistortRectifyMap(K_R, D_R, np.eye(3), K_R, frame_size, cv2.CV_16SC2)

        rectifiedL = cv2.remap(imgL, map1_L, map2_L, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
        rectifiedR = cv2.remap(imgR, map1_R, map2_R, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)

        # Combine left and right rectified images for visualization (not required, but useful for debugging)
        combined_rect = np.hstack((rectifiedL, rectifiedR))
        cv2.imwrite(os.path.join(debug_dir, f"independent_rectified_{i}.png"), combined_rect)

    return K_L, D_L, K_R, D_R

def stereo_calibration(calibration_images, frame_size, debug_dir, 
                     objpoints, imgpoints_L, imgpoints_R, output_dir,
                     K_L, D_L, K_R, D_R):

    output_dir = f"{output_dir}/stereo"
    os.makedirs(output_dir, exist_ok=True)

    imagesL, imagesR = calibration_images

    # calibration_flags = (
        # cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC 
        # cv2.fisheye.CALIB_CHECK_COND +
        # cv2.fisheye.CALIB_FIX_SKEW
    # )

    # K_L = np.zeros((3, 3), dtype=np.float64)
    # D_L = np.zeros((4, 1), dtype=np.float64)
    # K_R = np.zeros((3, 3), dtype=np.float64)
    # D_R = np.zeros((4, 1), dtype=np.float64) 
    R = np.eye(3, dtype=np.float64)
    T = np.zeros((3, 1), dtype=np.float64)

    print("objpoints", len(objpoints), len(objpoints[0]), objpoints[0].shape)
    print("imgpoints_L", len(imgpoints_L), len(imgpoints_L[0]), imgpoints_L[0].shape)
    print("imgpoints_R", len(imgpoints_R), len(imgpoints_R[0]), imgpoints_R[0].shape)

    ret, K_L, D_L, K_R, D_R, R, T, E, F = cv2.fisheye.stereoCalibrate(
        objpoints, imgpoints_L, imgpoints_R,
        K_L, D_L, K_R, D_R,
        frame_size, R, T,
        # flags=calibration_flags,
        criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
    )

    print(f"Stereo calibration error: {ret}")

    # Save parameters
    np.save(os.path.join(output_dir, "K_L.npy"), K_L, allow_pickle=False)
    np.save(os.path.join(output_dir, "D_L.npy"), D_L, allow_pickle=False)
    np.save(os.path.join(output_dir, "K_R.npy"), K_R, allow_pickle=False)
    np.save(os.path.join(output_dir, "D_R.npy"), D_R, allow_pickle=False)
    np.save(os.path.join(output_dir, "R.npy"), R, allow_pickle=False)
    np.save(os.path.join(output_dir, "T.npy"), T, allow_pickle=False)
    np.save(os.path.join(output_dir, "E.npy"), E, allow_pickle=False)
    np.save(os.path.join(output_dir, "F.npy"), F, allow_pickle=False)

    # Stereo rectification
    R_L, R_R, P_L, P_R, Q = cv2.fisheye.stereoRectify(
        K_L, D_L, K_R, D_R, frame_size, R, T, 
        flags=cv2.fisheye.CALIB_ZERO_DISPARITY
    )

    xmapL, ymapL = cv2.fisheye.initUndistortRectifyMap(
        K_L, D_L, R_L, P_L, frame_size, cv2.CV_16SC2
    )
    xmapR, ymapR = cv2.fisheye.initUndistortRectifyMap(
        K_R, D_R, R_R, P_R, frame_size, cv2.CV_16SC2
    )

    for i, (imgL_path, imgR_path) in enumerate(zip(imagesL, imagesR)):
        imgL = cv2.imread(imgL_path)
        imgR = cv2.imread(imgR_path)

        rectifiedL = cv2.remap(imgL, xmapL, ymapL, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
        rectifiedR = cv2.remap(imgR, xmapR, ymapR, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)

        combined_rect = np.hstack((rectifiedL, rectifiedR))
        cv2.imwrite(os.path.join(debug_dir, f"stereo_rectified_{i}.png"), combined_rect)


if __name__ == "__main__":

    # Parameters
    chessboard_size = (6, 7)
    square_size = 42.5

    # charuco_board_size = (5, 7)
    # square_length = 28.5
    # marker_length = 14.6
    # square_length = 30
    # marker_length = 15

    # aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_5X5_100)
    # board = cv2.aruco.CharucoBoard(charuco_board_size, square_length, marker_length, aruco_dict)
    # aruco_params = cv2.aruco.DetectorParameters()

    frame_size = (1280, 720)

    # Input and output dirs
    # images_dir = f"calibration_images/chessboard/big_rect/cb_big_rect"
    # images_dir = f"calibration_images/charuco/opencv/charuco"
    images_dir = "calibration_images/handpicked"
    image_dir_L = os.path.join(images_dir, "img_separated_L")
    image_dir_R = os.path.join(images_dir, "img_separated_R")
    output_dir = "calibration_matrices"
    os.makedirs(output_dir, exist_ok=True)

    # Debug directories
    debug_dir = "debug"
    debug_corners = os.path.join(debug_dir, "corners")
    debug_rectified = os.path.join(debug_dir, "rectified")
    debug_rejected = os.path.join(debug_dir, "rejected")
    debug_charuco = os.path.join(debug_dir, "charuco")
    for d in [debug_corners, debug_rectified, debug_rejected, debug_charuco]:
        os.makedirs(d, exist_ok=True)
    debug_dirs = (debug_corners, debug_rectified, debug_rejected)

    stereo_debug_dir = os.path.join(debug_dir, "stereo")
    os.makedirs(stereo_debug_dir, exist_ok=True)

    # Get image file paths (assumes matching sorted order)
    imagesL = sorted(glob.glob(os.path.join(image_dir_L, "*.png")))
    imagesR = sorted(glob.glob(os.path.join(image_dir_R, "*.png")))

    if len(imagesL) != len(imagesR) or len(imagesL) == 0:
        print("Error: Mismatch in number of left and right images or no images found.")
        sys.exit(1)
        
    valid_imgs_L = imagesL.copy()
    valid_imgs_R = imagesR.copy()
    recalibrate = True

    prev_L_length = len(valid_imgs_L)
    prev_R_length = len(valid_imgs_R)
    print(f"Number of valid images: {len(valid_imgs_L)}")
    print(f"Number of valid images: {len(valid_imgs_R)}")
    
    while recalibrate:
        calibration_images = (valid_imgs_L, valid_imgs_R)
        valid_imgs_L, valid_imgs_R, objpoints, imgpoints_L, imgpoints_R = image_selector(
            calibration_images, frame_size, chessboard_size, square_size, debug_dirs
        )
        # valid_imgs_L, valid_imgs_R, objpoints, imgpoints_L, imgpoints_R = charucho_image_selector(
        #     calibration_images, board, aruco_dict, aruco_params, debug_charuco
        # )

        if (len(valid_imgs_L) != prev_L_length or len(valid_imgs_R) != prev_R_length) and \
        (len(valid_imgs_L) != 0 and len(valid_imgs_R) != 0):
            prev_L_length = len(valid_imgs_L)
            prev_R_length = len(valid_imgs_R)
            print(f"Number of valid images: {len(valid_imgs_L)}")
            print(f"Number of valid images: {len(valid_imgs_R)}")

            print("Recalibrating...")
        else:
            recalibrate = False

    print(f"Accepted {len(valid_imgs_L)} images.")

    print("Running independent calibration...")
    K_L, D_L, K_R, D_R = mono_calibration(
        calibration_images, frame_size, debug_dir, objpoints,
        imgpoints_L, imgpoints_R, output_dir
    )

    print("Running stereo calibration...")
    calibration_images = (valid_imgs_L, valid_imgs_R) 
    stereo_calibration(
        calibration_images, frame_size, stereo_debug_dir, objpoints,
        imgpoints_L, imgpoints_R, output_dir, K_L, D_L, K_R, D_R
    )

The error message tells you what’s wrong. Either your objpoints / imgpoints are emtpy, or they aren’t the same size. It looks like you print out the size of objpoints and imgpoints_L and imgpoints_R. Are they all the same size?

I didn’t look through your code, but whatever you are doing to filter points needs to make sure you end up with n objpoints, and the n corresponding image points in both L and R. You can’t filter a point in L and not filter the same point in R. At least that’s what it looks like to me…

I managed to fix it, although it is not exactly what you mentioned, I was able to properly understand the error, it was more a silly bug… Thanks!