Unable to rectify stereo cameras correctly

Good afternoon, I am trying to calibrate a stereo camera model ELP-1080P2CAM-l28 (http://www.elpcctv.com/2mp-ar0330-uvc-otg-binocular-stereo-vision-camera-module-with-28mm-lens- for-ar-augmented-reality-p-252.html)

When calibrating the cameras with the images, I generate a disparity map, store it and later use it to rectify the same with cv2.remap()

The problem is that the cv2.remap() function usually returns completely black images.

this is the code i use to calibrate the cameras and store the stereo map:

import numpy as np
import cv2 as cv
import glob, os

img_globdir="/home/admin/CALIBRATION"

################ FIND CHESSBOARD CORNERS - OBJECT POINTS AND IMAGE POINTS #############################

chessboardSize = (9,6)
frameSize = (320,240)



# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)


# Creamos puntos de imagenes en 3 dimensiones
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)

#Multiplicamos al final del mgrid por el tamaño de los cuadrados del tablero (23mm para este caso)

objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)

objp=objp*22

#Se devolera un mapa de puntos en la unidad entregada anteriormente, en este caso en milimetros.

# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpointsL = [] # 2d points in image plane.
imgpointsR = [] # 2d points in image plane.


imagesLeft = sorted(glob.glob(f'{img_globdir}/FOTO_IZQ/*.png'))
imagesRight = sorted(glob.glob(f'{img_globdir}/FOTO_DER/*.png'))
#name_map=os.path.basename(imagesRight[0])[:16]
name_map="10000000bc3fed61"
for imgLeft, imgRight in zip(imagesLeft, imagesRight):
    if imgLeft==imgRight.replace("DER","IZQ"):
        print(True)
    imgL = cv.imread(imgLeft)
    imgR = cv.imread(imgRight)
    grayL = cv.cvtColor(imgL, cv.COLOR_BGR2GRAY )
    grayR = cv.cvtColor(imgR, cv.COLOR_BGR2GRAY)

    # Find the chess board corners
    retL, cornersL = cv.findChessboardCorners(grayL, chessboardSize, None)
    retR, cornersR = cv.findChessboardCorners(grayR, chessboardSize, None)

    # If found, add object points, image points (after refining them)
    if retL and retR == True:

        objpoints.append(objp)

        cornersL = cv.cornerSubPix(grayL, cornersL, (11,11), (-1,-1), criteria)
        imgpointsL.append(cornersL)

        cornersR = cv.cornerSubPix(grayR, cornersR, (11,11), (-1,-1), criteria)
        imgpointsR.append(cornersR)

        # Draw and display the corners
        cv.drawChessboardCorners(imgL, chessboardSize, cornersL, retL)
        cv.imshow('img left', cv.resize(imgL,(640,480)))
        cv.drawChessboardCorners(imgR, chessboardSize, cornersR, retR)
        cv.imshow('img right', cv.resize(imgR,(640,480)))
        cv.waitKey(0)
    else:
        print("Imagen",imgLeft)


cv.destroyAllWindows()




############## CALIBRATION #######################################################

retL, cameraMatrixL, distL, rvecsL, tvecsL = cv.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))

retR, cameraMatrixR, distR, rvecsR, tvecsR = cv.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))

print(imgL.shape, " : ",imgR.shape)


########## Stereo Vision Calibration #############################################

flags = 0
flags |= cv.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same

criteria_stereo= (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# A PARTIR DE ACA, ALGO NOS SALE COMO EL TUJE. Probaria con un patron asimetrico circular y ver si eso cambia.
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)


########## Stereo Rectification #################################################

rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
for elem in Q:
    print(np.round(elem))

stereoMapL = cv.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv.CV_16SC2)
stereoMapR = cv.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv.CV_16SC2)


print("Saving parameters!")
if not os.path.exists(f'{img_globdir}/MAPAS'):
    os.mkdir(f'{img_globdir}/MAPAS')
cv_file = cv.FileStorage(f'{img_globdir}/MAPAS/{name_map}.xml', cv.FILE_STORAGE_WRITE)

cv_file.write('stereoMapL_x',stereoMapL[0])
cv_file.write('stereoMapL_y',stereoMapL[1])
cv_file.write('stereoMapR_x',stereoMapR[0])
cv_file.write('stereoMapR_y',stereoMapR[1])

cv_file.release()


With this other, I correct the images

import sys
import numpy as np
import time
import imutils
import cv2

# Camera parameters to undistort and rectify images
cv_file = cv2.FileStorage()
cv_file.open('stereoMap.xml', cv2.FileStorage_READ)

stereoMapL_x = cv_file.getNode('stereoMapL_x').mat()
stereoMapL_y = cv_file.getNode('stereoMapL_y').mat()
stereoMapR_x = cv_file.getNode('stereoMapR_x').mat()
stereoMapR_y = cv_file.getNode('stereoMapR_y').mat()


def undistortRectify(frameR, frameL):

    # Undistort and rectify images
    undistortedL= cv2.remap(frameL, stereoMapL_x, stereoMapL_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
    undistortedR= cv2.remap(frameR, stereoMapR_x, stereoMapR_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)


    return undistortedR, undistortedL

The images I took from the cameras are the following:

https://ibb.co/NWR9LWw
https://ibb.co/8zZtFd8
https://ibb.co/Syr1Ngq
https://ibb.co/CwhB4XX
https://ibb.co/hgbQWs7
https://ibb.co/6D9ShPQ
https://ibb.co/QDfqW11

I don’t know what I’m doing wrong, I already tried to change the resolution of the images, use larger patterns, even set the cameras 1.5 meters from a flat table and take new images, but even so when I rectify them it always returns black images.

I suppose that I am having some error when rectifying, since I downloaded the images that this user took and the rectification gave me quite good. (ComputerVision/StereoVisionDepthEstimation/images at master · niconielsen32/ComputerVision · GitHub)

EDIT: as you saw me in the comments, I attach the undistortion maps in the following link:

Thanks a lot

1 Like

disparity map & undistortion maps are completely different things !

unfortunately, you dont show that, only collection of chessboard images / points.
please update, so we can see, how stereoMapL_x etc. are generated

I’m sorry, you’re right!! my mistake, I mean undistortion maps. I’ll edit the post by uploading these.

i did’t mean the actual maps, but the calibration part of the code
(but thanks, anyway !)

Ok. edit that too, I put the complete code!

1 Like

well, i took a look at your images, and (as always) – you cant make gold from shite :wink:

  • you need an absolutely flat board (not a floppy piece of paper, glue it to something rigid)
  • the white zone has to be completely visible
  • more variation in pose / angle
  • see, that you have the whole fov covered, esp. the corners !
  • maybe allow for more distance (dont simply lay it on the table)
  • make sure, you have enough valid pairs (+20)

try to get the error returned from stereoCalibrate under 0.5.
if nessecary, throw out bad image pairs, and take new ones
rinse & repeat

again calibrating stereo cams can be the most painful / frustrating thing, dont give up !

Thanks for your answer berak! I’ll start doing it again tomorrow following your advice. When I have something I will post again! Thanks for your time and your help!!

Good afternoon berak!! I have followed your advice, I took 142 pairs of images in the day. I no longer get all black, now I receive a much more consistent response. Although anyway, checking each image one by one, I don’t understand why the image is cut off in the undistortion output.

In any case, it is clear that I still have to continue filtering and taking more images, but I cannot understand why it is chroped. I attach the 2 test input images and the output undistortion images of my code.

As can be seen in the last image, the straight lines coincide almost well on the left side, not so much on the right, but I suppose it is a matter of continuing testing. But the images have like a “zoom” in the chessboard area.

Thank you very much for your help and your patience!

I’ve seen this “effect” before as well. I think it’s estimating some translation in Z for those cameras, when there isn’t one.

the calibration routines in OpenCV frustrate me too. awful documentation of the whole process, lots of magic, impossible to get a sense for what one did right and what needs improvement…

that is to say, if this frustrates you, it’s not necessarily your fault, and you are not alone.

You’re right, it sure happens to many. But I have found a great improvement by performing the calibration with a much larger pattern. Use the opencv “gen_pattern.py” tool and generate a chessboard with 28 rows and 17 columns. Being that I only calibrate it with 22 pairs of images, now my result is the following

Of course it is still warped at the corners. But it doesn’t zoom in as big as it used to. I guess I need to keep trying. I suspect that by using larger patterns, it has a greater number of reference points which helps a lot, but cameras with higher resolution would be needed (mine are 1920 x 1080) since when wanting to use a 41 x 25 pattern, it did not reach detect the corners of the squares. I will try after this to acquire 3840x2880 cameras to see if the calibration is indeed better.

I also read somewhere that circular patterns provide greater precision when calibrating.

One last question that I would like to clear up is, just as the image is, the points coincide in both the left and right cameras, so if I wanted to measure the depth of the object, I should only use the formula

Z= (f*b)/disparty

where “f” and “b” are obtained from the matrix Q obtained in cv.stereoRectify() and the disparity would be the difference between the centers of the bounding boxes of my detection in both images.

Now, is it convenient to take those parameters directly from the Q Matrix?? or is it better to place them in the code according to the known parameters of the camera?

First of all, thank you both for guiding me and helping me, I hope this post will be useful for future readers.

try charuco. those patterns you don’t have to keep entirely in view. that allows you to get points in the corners of the image.

1 Like

I’m noticing a lot of residual distortion in that last image pair. I also notice that features line up on some of the lines, but not others. For example the line that intersects the top of the table leg on the left image seems quite a bit off in the right image.

What reprojection error value are you getting when you calibrate the camera? (I would focus on getting intrinsic calibration for each camera as an independent “one time” process. Once you have the intrinsics well calibrated you can focus only on calibrating the extrinsics / relative pose of the pair.)

I agree with crackwitz’s suggestion to use a Charuco pattern so that you get more calibration points closer to the image edges/corners - this will help a lot with your distortion model. I also suggest using the 8 parameter (rational) model by using the flag CALIB_RATIONAL_MODEL. (I get much better results with that)

In addition to looking at the calibration score, I undistort the image and draw lines on it that coincide with the chessboard pattern. I find it helpful to look at how closely the lines match up with the chessboard image, particularly further out (near the corners) of the image. I also inspect the vanishing point / intersection of the lines. High quality calibrations result in a very tight intersection radius, lower quality results have a much larger “intersection” point. I have found that the score doesn’t always give a good indication of calibration results - particularly if you end up filtering a lot of points or otherwise don’t use points near the corners of the image.

I have included an example of a distorted / undistorted (with lines drawn) image to show what I mean.

For reference, these images are from a calibration that included 15 input images and had an RMS error of about 0.15.

Note that with a lens with this much distortion the Charuco calibration process doesn’t do a great job finding point in the highly distorted area on its own. This could be related to the size of the markers I’m using, which don’t get detected very well toward the edges. I iteratively add points (predict locations, find with cornerSubpix) in order to use all of the points in the image. In other words, the Charuco calibration can unlock better results but you will probably have to do some additional work for best results.

1 Like

Good morning, thanks for the answer of both.Did you make a machine to calibrate? I’m impressed. I don’t know how to measure the reprojection error, but I’ll get to that soon and I’ll give you an answer as soon as I get it. I actually followed your advice and went straight to the OPENCV documentation to look at the charucoboard.

I’m trying to calibrate again with the charucoboard, but it seems that I’m putting together the objspoints wrong, in the OPENCV documentation it doesn’t talk about it, and I didn’t find an answer on the internet either.

Looking at the code I realize that when a camera does not detect the same markers or corners as the other image, it changes the number of elements between “imgpointL” and “imgpointR”. Now, if I use the charucoboard precisely so that it occupies the corners of my cameras, both will never see the same points. How can i solve this??

I guess the problem is in this part:

        if len(resL[0]) > 0 and len(resR[0]) > 0:
            # Interpolamos los elementos del tablero CHARUCO

            _, cornersL, idsL = cv2.aruco.interpolateCornersCharuco(resL[0], resL[1], grayL, board)
            _, cornersR, idsR = cv2.aruco.interpolateCornersCharuco(resR[0], resR[1], grayR, board)

            cornersL = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
            cornersR = cv2.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)

            objpoints_L, imgpoints_L = cv2.aruco.getBoardObjectAndImagePoints(board, cornersL, idsL)
            objpoints_R, imgpoints_R = cv2.aruco.getBoardObjectAndImagePoints(board, cornersR, idsR)

            # De ser efectivo, almacenamos las respuestas
            if cornersL is not None and cornersR is not None and idsL is not None and idsR is not None and len(cornersL)>3 and len(cornersR)>3:
                #cornersL = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria
                print("append")
                allCornersL.append(cornersL)
                allIdsL.append(idsL)
                objpoints.append(objpoints_L)
                imgpointsL.append(imgpoints_L)

                allCornersR.append(cornersR)
                allIdsR.append(idsR)
                imgpointsR.append(imgpoints_R)

thank you again!!

I guess you could go through both lists of points and only keep what was seen in both cameras. I haven’t done much with stereo calibration, so I’m not really sure how Charuco plays with that.

However, I believe that you can calibrate the intrinsics separately from the relative pose. That is what I would do, but I’m not certain it’s the better approach, it just seems better to me.

Here is what I would do:

  1. Calibrate camera 1 intrinsics with Charuco board.
  2. Calibrate camera 2 intrinsics with Charuco board.

Once you are satisfied with the intrinsic calibration results, save the camera matrix and distortion coefficients and use them in the future. I’m suggesting doing this as a separate one-time step so you can focus on getting good results - you may not use the same calibration target / input images for your stereo calibration.

  1. Calibrate them as a stereo pair, making sure to pass in the intrinsics you got from steps 1 and 2, and also being sure to pass in the CALIB_FIX_INTRINSIC flag.

I’m a little bit out of my depth, though. You might be able to get great results by calibrating the intrinsics and stereo pose at the same time. In any case if the stereoCalibrate function wants the same points for both cameras, you will have to take care of that somehow.

Good luck.

Thank you very much for your answer. Indeed, I was able to correctly calibrate the cameras separately and store their results.

The truth is a joy to be able to put together this post, since I did not find information on how to calibrate stereo cameras with charuco.

But I have a problem that repeats itself even though I calibrate them separately or at the same time.

The cv2.StereoCalibrate() function requires that the lengths of imgPointsL and imgPointsR are equal, and that each corresponds to the same objPoints.

This indicates that even if I calibrate the cameras separately, or at the same time, I still need to pass ONLY the objPoints detected on both cameras at the same time as an argument. This means that I end up calibrating them together.

My question is, how do I generate those objpoints for stereo calibration? Using the chessboard it was like this:

chessboardSize = (27,16)
#
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)

And in each pair of images that the corners were detected:

objpoints.append(objp)

So I copied this part, and replaced the size of the chessboard with that of the Charucoboard, but clearly it didn’t work.

But before I give up, do some research and note that objPoints can be set to matching “ids” on both cameras. Therefore, apply code to filter the matching ones and their respective imgPoints.

 cornersL = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
            cornersR = cv2.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)
            
            # Encontrar puntos comunes 
            objPtsA, imgPtsA = cv2.aruco.getBoardObjectAndImagePoints(board, cornersL, idsL)
            objPtsB, imgPtsB = cv2.aruco.getBoardObjectAndImagePoints(board, cornersR, idsR)

            #Crear diccionario para cada frame.
            ptsA = {tuple(a): tuple(b) for a, b in zip(objPtsA[:, 0], imgPtsA[:, 0])}
            ptsB = {tuple(a): tuple(b) for a, b in zip(objPtsB[:, 0], imgPtsB[:, 0])}
            common = set(ptsA.keys()) & set(ptsB.keys())  # intersection between obj points

            for objP in common:
                objectPoints.append(np.reshape(objP, (1, 3)))
                imagePointsL.append(np.reshape(ptsA[objP], (1, 2)))
                imagePointsR.append(np.reshape(ptsB[objP], (1, 2)))

The problem is in the parameters that I passed to the cv2.Stereocalibrate() function. I will leave attached the following link, with the arguments objectPoints, imagePointsL, imagePointsR.

https://easyupload.io/c7dfau

and in these, I leave the only pair of images that I am using to do this test (9x9 board)

Perhaps someone who reads this post knows that it will be wrong.

The error that throws me is the following

cv2.stereoCalibrate(objectPoints, imagePointsL, imagePointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, (widthL ,heightL),criteria_stereo, flags)
cv2.error: OpenCV(4.5.5) /io/opencv/modules/calib3d/src/calibration.cpp:1088: error: (-215:Assertion failed) (count >= 4) || (count == 3 && useExtrinsicGuess) in function 'cvFindExtrinsicCameraParams2'

Thank you

I’m not looking at the same version of the source code as you are, but it appears that count is the number of objectPoints being passed in, so I’d check how many points in common you find.

As I understand it, if you are providing the intrinsics, you should only need a single image to get your relative pose. You might get better results with more images, but I’d start with a single image where both cameras see all the whole pattern. (To me one advantage of calibrating the intrinsics separately is that getting the relative pose is easier.)

I don’t have a lot of experience here, though.

Same problem. Have been trying to use various techniques discussed in the replies.