Good Afternoon, Thank you both for your reply.
I followed your advice @crackwitz and proceeded to recalibrate the cameras separately, and then in Stereo.
Now I have in both a reprojection error of 0.57 and 0.59, but with a set of 45 and 55 images respectively, giving different angles of inclination to the board with respect to the camera, also greater and lesser distance from it… Whereas previously I only calibrated with 6 images in each camera.
Now, I have a problem. I was thinking about putting together this code, where I use charuco to calibrate the cameras in Stereo. The only limitation is that both cameras must observe the same Ids as the other.
The problem is in the result, I don’t know why the image is extruded in the form of a cone, I attach the code and the result in case you know what is wrong.
import cv2,os
import glob
import time
import numpy as np
#Name for save XML
map_name="10000000bc3fed61"
#Create Dictionary
rows=4
cols=4
sqr_size=.01
qr_size=.008
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_1000)
board = cv2.aruco.CharucoBoard_create(rows,cols,sqr_size,qr_size,dictionary)
imagesLeft = sorted(glob.glob(f'/home/admin/CALIBRATION/FOTO_IZQ_CH/*.png'))
imagesRight = sorted(glob.glob(f'/home/admin/CALIBRATION/FOTO_DER_CH/*.png'))
#
objpoints=[]
corners_list_L = [] #Esquinas del CHARUCO imagen Izquierda
id_list_L = [] #IDs del CHARUCO imagen Izquierda
corners_list_R = [] #Esquinas del CHARUCO imagen Izquierda
id_list_R = []
imgpointsR = []
imgpointsL = []
#
cv2.destroyAllWindows()
##TEST
for images_l, images_r in zip(imagesLeft,imagesRight):
# Left Image Points
img_l = cv2.imread(images_l)
grayL = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
corners_L, ids_L, rejected_L = cv2.aruco.detectMarkers(grayL, dictionary)
resp_L, charuco_corners_L, charucos_ids_L = cv2.aruco.interpolateCornersCharuco(corners_L, ids_L, grayL, board)
objpoints_L, imgpoints_L = cv2.aruco.getBoardObjectAndImagePoints(board, charuco_corners_L, charucos_ids_L)
# Right Image Points
img_r = cv2.imread(images_r)
grayR = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
corners_R, ids_R, rejected_R = cv2.aruco.detectMarkers(grayR, dictionary)
resp_R, charuco_corners_R, charucos_ids_R = cv2.aruco.interpolateCornersCharuco(corners_R, ids_R, grayR, board)
objpoints_R, imgpoints_R = cv2.aruco.getBoardObjectAndImagePoints(board, charuco_corners_R, charucos_ids_R)
#Detections control
print(resp_R, " : ",resp_L)
if resp_L == resp_R and (resp_L and resp_R) > 1:
corners_list_L.append(charuco_corners_L)
corners_list_R.append(charuco_corners_R)
id_list_L.append(charucos_ids_L)
id_list_R.append(charucos_ids_R)
objpoints.append(objpoints_L)
imgpointsR.append(imgpoints_R)
imgpointsL.append(imgpoints_L)
# Draw and display
cv2.aruco.drawDetectedCornersCharuco(img_l, charuco_corners_L, charucos_ids_L, (255,0,0))
cv2.aruco.drawDetectedCornersCharuco(img_r, charuco_corners_R, charucos_ids_R, (255,0,0))
cv2.imshow('imgL', cv2.resize(img_l,(640,480)))
cv2.imshow('imgR', cv2.resize(img_r,(640,480)))
cv2.moveWindow("imgR", 800, 0)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv_file = cv2.FileStorage(f'/home/admin/CALIBRATION/MAPAS/{os.path.basename(imagesLeft[0])[:16]}_CHARUCO.xml', cv2.FILE_STORAGE_READ)
cameraMatrixL = cv_file.getNode('CameraMatrix_L').mat()
cameraMatrixR = cv_file.getNode('CameraMatrix_R').mat()
newCameraMatrixL = cv_file.getNode('NEWCameraMatrix_L').mat()
newCameraMatrixR = cv_file.getNode('NEWCameraMatrix_R').mat()
distL=cv_file.getNode('Dist_L').mat()
distR=cv_file.getNode('Dist_R').mat()
cv_file.release()
flags = 0
flags |= cv2.CALIB_USE_INTRINSIC_GUESS
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, cameraMatrixL,distL, cameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)
rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv2.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
#print(retStereo)
stereoMapL = cv2.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv2.CV_16SC2)
stereoMapR = cv2.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv2.CV_16SC2)
undistortedL= cv2.remap(img_l, stereoMapL[0], stereoMapL[1], cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
undistortedR= cv2.remap(img_r, stereoMapR[0], stereoMapR[1], cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
vis = np.concatenate((undistortedL, undistortedR), axis=1)
heigth, width, _ = vis.shape
for i in range(0, 10):
cv2.line(vis, (0, int(heigth / 10) * i), (width, int(heigth / 10) * i), (0, 255, 0))
cv2.imshow("Result", cv2.resize(vis, (1280, 720)))
cv2.waitKey(0)
And the output is this:
But when I calibrate with Chessboard, I use the following code:
import numpy as np
import cv2 as cv
import glob, os
img_globdir="/home/admin/CALIBRATION"
################ FIND CHESSBOARD CORNERS - OBJECT POINTS AND IMAGE POINTS #############################
#chessboardSize = (26,19) #[row,cols]
chessboardSize = (20,13)
frameSize = (2592,1944) #[width,heigth]
sqre_size=12.5 #[mm]
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
chessboardSize=(chessboardSize[0]-1,chessboardSize[1]-1)
# Creamos puntos de imagenes en 3 dimensiones
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)
#Multiplicamos al final del mgrid por el tamaño de los cuadrados del tablero (23mm para este caso)
objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)
objp=objp*sqre_size
#Se devolera un mapa de puntos en la unidad entregada anteriormente, en este caso en milimetros.
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpointsL = [] # 2d points in image plane.
imgpointsR = [] # 2d points in image plane.
imagesLeft = sorted(glob.glob(f'{img_globdir}/FOTO_IZQ/*.png'))
imagesRight = sorted(glob.glob(f'{img_globdir}/FOTO_DER/*.png'))
for imgLeft, imgRight in zip(imagesLeft, imagesRight):
imgL = cv.imread(imgLeft)
imgR = cv.imread(imgRight)
grayL = cv.cvtColor(imgL, cv.COLOR_BGR2GRAY )
grayR = cv.cvtColor(imgR, cv.COLOR_BGR2GRAY)
# Find the chess board corners
retL, cornersL = cv.findChessboardCorners(grayL, chessboardSize, None)
retR, cornersR = cv.findChessboardCorners(grayR, chessboardSize, None)
print(retL," : ", retR)
# If found, add object points, image points (after refining them)
if retL and retR == True:
# Draw and display the corners
cv.drawChessboardCorners(imgL, chessboardSize, cornersL, retL)
cv.imshow('img left', cv.resize(imgL,(800,450)))
cv.drawChessboardCorners(imgR, chessboardSize, cornersR, retR)
cv.imshow('img right', cv.resize(imgR,(800,450)))
while 1:
k=cv.waitKey(33)
if k == ord('y'): # Esc key to stop
valid=True
break
if k== ord('n'):
valid=False
break
if k == ord('d'):
valid = False
print("Borradas")
os.remove(imgLeft)
os.remove(imgRight)
break
if valid:
print("Validamos")
objpoints.append(objp)
cornersL = cv.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
imgpointsL.append(cornersL)
cornersR = cv.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)
imgpointsR.append(cornersR)
else:
os.remove(imgLeft)
os.remove(imgRight)
cv.destroyAllWindows()
############## CALIBRATION #######################################################
cv_file = cv.FileStorage(f'/home/admin/CALIBRATION/DOCUMENTOS/MAPAS/{os.path.basename(imagesLeft[0])[:16]}_CHARUCO.xml', cv.FILE_STORAGE_READ)
cameraMatrixL = cv_file.getNode('CameraMatrix_L').mat()
cameraMatrixR = cv_file.getNode('CameraMatrix_R').mat()
newCameraMatrixL = cv_file.getNode('NEWCameraMatrix_L').mat()
newCameraMatrixR = cv_file.getNode('NEWCameraMatrix_R').mat()
distL=cv_file.getNode('Dist_L').mat()
distR=cv_file.getNode('Dist_R').mat()
cv_file.release()
########## Stereo Vision Calibration #############################################
flags = 0
flags |= cv.CALIB_USE_INTRINSIC_GUESS
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same
criteria_stereo= (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# A PARTIR DE ACA, ALGO NOS SALE COMO EL TUJE. Probaria con un patron asimetrico circular y ver si eso cambia.
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)
print(retStereo)
########## Stereo Rectification #################################################
rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
stereoMapL = cv.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv.CV_16SC2)
stereoMapR = cv.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv.CV_16SC2)
undistortedL= cv.remap(imgL, stereoMapL[0], stereoMapL[1], cv.INTER_LANCZOS4, cv.BORDER_CONSTANT, 0)
undistortedR= cv.remap(imgR, stereoMapR[0], stereoMapR[1], cv.INTER_LANCZOS4, cv.BORDER_CONSTANT, 0)
vis = np.concatenate((undistortedL, undistortedR), axis=1)
heigth, width, _ = vis.shape
for i in range(0, 10):
cv.line(vis, (0, int(heigth / 10) * i), (width, int(heigth / 10) * i), (0, 255, 0))
cv.imshow("CON", cv.resize(vis, (1280, 720)))
cv.waitKey(0)
and i get the following:
If you know what I’m doing wrong when calibrating the Stereo with charuco, I would appreciate it.
Anyway, save the .XML obtained from the Stereo calibration with Chessboard and calculate the disparity map again with the same images and the values that @Sencis recommended, but i couldn’t get the numdisparity up to 256 since the image was completely black, leaving it currently at 60.
The resulting disparity map is this:
It improved considerably, but I think the problem is in the calibration, since by increasing the number of images in each individual calibration, an improvement in the colors of the image was perceived, since at least you do not see white in the background, now follows a logic.
But I would like to improve the Stereo calibration with Charuco, I don’t know why the result is so bad, being that I use more than 14 images the result is too poor.
Thank you both for sharing your knowledge!!!