Good afternoon, I am trying to calibrate a stereo camera model ELP-1080P2CAM-l28 (http://www.elpcctv.com/2mp-ar0330-uvc-otg-binocular-stereo-vision-camera-module-with-28mm-lens- for-ar-augmented-reality-p-252.html)
When calibrating the cameras with the images, I generate a disparity map, store it and later use it to rectify the same with cv2.remap()
The problem is that the cv2.remap() function usually returns completely black images.
this is the code i use to calibrate the cameras and store the stereo map:
import numpy as np
import cv2 as cv
import glob, os
img_globdir="/home/admin/CALIBRATION"
################ FIND CHESSBOARD CORNERS - OBJECT POINTS AND IMAGE POINTS #############################
chessboardSize = (9,6)
frameSize = (320,240)
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Creamos puntos de imagenes en 3 dimensiones
objp = np.zeros((chessboardSize[0] * chessboardSize[1], 3), np.float32)
#Multiplicamos al final del mgrid por el tamaño de los cuadrados del tablero (23mm para este caso)
objp[:,:2] = np.mgrid[0:chessboardSize[0],0:chessboardSize[1]].T.reshape(-1,2)
objp=objp*22
#Se devolera un mapa de puntos en la unidad entregada anteriormente, en este caso en milimetros.
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpointsL = [] # 2d points in image plane.
imgpointsR = [] # 2d points in image plane.
imagesLeft = sorted(glob.glob(f'{img_globdir}/FOTO_IZQ/*.png'))
imagesRight = sorted(glob.glob(f'{img_globdir}/FOTO_DER/*.png'))
#name_map=os.path.basename(imagesRight[0])[:16]
name_map="10000000bc3fed61"
for imgLeft, imgRight in zip(imagesLeft, imagesRight):
if imgLeft==imgRight.replace("DER","IZQ"):
print(True)
imgL = cv.imread(imgLeft)
imgR = cv.imread(imgRight)
grayL = cv.cvtColor(imgL, cv.COLOR_BGR2GRAY )
grayR = cv.cvtColor(imgR, cv.COLOR_BGR2GRAY)
# Find the chess board corners
retL, cornersL = cv.findChessboardCorners(grayL, chessboardSize, None)
retR, cornersR = cv.findChessboardCorners(grayR, chessboardSize, None)
# If found, add object points, image points (after refining them)
if retL and retR == True:
objpoints.append(objp)
cornersL = cv.cornerSubPix(grayL, cornersL, (11,11), (-1,-1), criteria)
imgpointsL.append(cornersL)
cornersR = cv.cornerSubPix(grayR, cornersR, (11,11), (-1,-1), criteria)
imgpointsR.append(cornersR)
# Draw and display the corners
cv.drawChessboardCorners(imgL, chessboardSize, cornersL, retL)
cv.imshow('img left', cv.resize(imgL,(640,480)))
cv.drawChessboardCorners(imgR, chessboardSize, cornersR, retR)
cv.imshow('img right', cv.resize(imgR,(640,480)))
cv.waitKey(0)
else:
print("Imagen",imgLeft)
cv.destroyAllWindows()
############## CALIBRATION #######################################################
retL, cameraMatrixL, distL, rvecsL, tvecsL = cv.calibrateCamera(objpoints, imgpointsL, frameSize, None, None)
heightL, widthL, channelsL = imgL.shape
newCameraMatrixL, roi_L = cv.getOptimalNewCameraMatrix(cameraMatrixL, distL, (widthL, heightL), 1, (widthL, heightL))
retR, cameraMatrixR, distR, rvecsR, tvecsR = cv.calibrateCamera(objpoints, imgpointsR, frameSize, None, None)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))
print(imgL.shape, " : ",imgR.shape)
########## Stereo Vision Calibration #############################################
flags = 0
flags |= cv.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same
criteria_stereo= (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# A PARTIR DE ACA, ALGO NOS SALE COMO EL TUJE. Probaria con un patron asimetrico circular y ver si eso cambia.
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)
########## Stereo Rectification #################################################
rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))
for elem in Q:
print(np.round(elem))
stereoMapL = cv.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv.CV_16SC2)
stereoMapR = cv.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv.CV_16SC2)
print("Saving parameters!")
if not os.path.exists(f'{img_globdir}/MAPAS'):
os.mkdir(f'{img_globdir}/MAPAS')
cv_file = cv.FileStorage(f'{img_globdir}/MAPAS/{name_map}.xml', cv.FILE_STORAGE_WRITE)
cv_file.write('stereoMapL_x',stereoMapL[0])
cv_file.write('stereoMapL_y',stereoMapL[1])
cv_file.write('stereoMapR_x',stereoMapR[0])
cv_file.write('stereoMapR_y',stereoMapR[1])
cv_file.release()
With this other, I correct the images
import sys
import numpy as np
import time
import imutils
import cv2
# Camera parameters to undistort and rectify images
cv_file = cv2.FileStorage()
cv_file.open('stereoMap.xml', cv2.FileStorage_READ)
stereoMapL_x = cv_file.getNode('stereoMapL_x').mat()
stereoMapL_y = cv_file.getNode('stereoMapL_y').mat()
stereoMapR_x = cv_file.getNode('stereoMapR_x').mat()
stereoMapR_y = cv_file.getNode('stereoMapR_y').mat()
def undistortRectify(frameR, frameL):
# Undistort and rectify images
undistortedL= cv2.remap(frameL, stereoMapL_x, stereoMapL_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
undistortedR= cv2.remap(frameR, stereoMapR_x, stereoMapR_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
return undistortedR, undistortedL
The images I took from the cameras are the following:
https://ibb.co/NWR9LWw
https://ibb.co/8zZtFd8
https://ibb.co/Syr1Ngq
https://ibb.co/CwhB4XX
https://ibb.co/hgbQWs7
https://ibb.co/6D9ShPQ
https://ibb.co/QDfqW11
I don’t know what I’m doing wrong, I already tried to change the resolution of the images, use larger patterns, even set the cameras 1.5 meters from a flat table and take new images, but even so when I rectify them it always returns black images.
I suppose that I am having some error when rectifying, since I downloaded the images that this user took and the rectification gave me quite good. (ComputerVision/StereoVisionDepthEstimation/images at master · niconielsen32/ComputerVision · GitHub)
EDIT: as you saw me in the comments, I attach the undistortion maps in the following link:
Thanks a lot