White disparity map after rectification

Hi Forum,

I’m working on a school project that uses 2 Flir Lepton cameras (160x120) to create a disparity map. In the program i wrote, i displayed the disparity map before calibrating and rectifying, and it appears like how it should, but with some black bar at the side. However, after doing the calibration, it becomes pure white with the same black bar at the side. Any ideas what I’m doing wrong?

This is what I did to generate the disparity map:

  1. Set camera path, rotate, crop and colourise image
  2. Find individual camera parameters for calibration. From a few photos.
  3. Use the same individual camera parameters for stereo calibrate and rectify
  4. Create disparity map

I have attached my code for your reference. I apologise if it’s a bit long, I’m kinda new to programming too, so i could have made some logical problems.

from flirpy.camera.lepton import Lepton
from pathlib import Path
import numpy as np
import cv2
import os
import glob
import random
import imutils


def autoCapture(left_img, right_img, state):
    img_pathL = \
        'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Captured Images/Left'
    img_pathR = \
        'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Captured Images/Right'
    retL, cornersL = cv2.findChessboardCorners(left_img, (9, 5), None)
    retR, cornersR = cv2.findChessboardCorners(right_img, (9, 5), None)
    if state == True:
        if retL == True or retR == True:
            captureImages(img_l, img_r, img_writer, img_pathL, img_pathR)
            return True
    else:
        return


def autoCaptureStereo(left_img, right_img, state):
    img_pathL = \
        'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Stereo Captured Images/Left'
    img_pathR = \
        'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Stereo Captured Images/Right'
    retL, cornersL = cv2.findChessboardCorners(left_img, (9, 5), None)
    retR, cornersR = cv2.findChessboardCorners(right_img, (9, 5), None)
    if state == True:
        if retL == True and retR == True:
            captureImages(img_l, img_r, stereo_img_writer, img_pathL, img_pathR)
            return True
    else:
        return


def calibrateImages(side):
    # board shape
    width = 9
    height = 5

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((width * height, 3), np.float32)
    objp[:, :2] = np.mgrid[0:width, 0:height].T.reshape(-1, 2)

    objp = objp * 0.05  # 5 cm square

    # Arrays to store object points and image points from all the images_l.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    if side == 'R':
        img_side = 'Right'
    elif side == 'L':
        img_side = 'Left'

    images = glob.glob(
        'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Stereo Captured '
        'Images/' + img_side + '/' + img_side + '*.jpg')
    count = 0

    # Start calibration from the camera
    for fname in images:
        old_name = Path(fname).stem + '.jpg'
        old_file = os.path.join(
            'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Stereo Captured Images/' + img_side + '/',
            old_name)
        print(old_file)

        if count > 20:
            os.remove(old_file)
        else:
            img = cv2.imread(fname)  # fname is path
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, (width, height), None)
            # If found, add object points, image points (after refining them)
            if ret == True:
                print(old_name, " Detected")
                objpoints.append(objp)

                corners2 = cv2.cornerSubPix(gray, corners, (2, 2), (-1, -1), criteria)  # for drawing
                imgpoints.append(corners2)

                count += 1
            else:
                print(old_name, " Not Detected\n"
                                "Attempting correction...")
                a = 0
                while ret == False and a < 100:
                    random.seed(a)
                    alpha = random.random()
                    beta = random.randint(50, 200)
                    gray = cv2.addWeighted(gray, alpha, np.zeros(gray.shape, gray.dtype), 0, beta)
                    ret, corners = cv2.findChessboardCorners(gray, (width, height), None)
                    a += 1
                else:
                    if ret == True:
                        print(old_name, " Detected after correction")
                        objpoints.append(objp)

                        corners2 = cv2.cornerSubPix(gray, corners, (2, 2), (-1, -1), criteria)
                        imgpoints.append(corners2)

                        count += 1
                    else:
                        print(old_name, " Correction failed after 100 attempts")
                        # os.remove(old_file)


    # Determine the new values for different parameters (mono)
    print("\nComputing parameters...\n"
          "This may take quite some time...\n")

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
    h, w = gray.shape[:2]
    Omtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist,
                                                (w, h), 1, (w, h))


    return mtx, dist, Omtx, roi, objpoints, imgpoints


def captureImages(left_img, right_img, count, img_pathL, img_pathR):
    image_name_right = "Right{}.jpg".format(count)
    cv2.imwrite(os.path.join(img_pathR, image_name_right), right_img)
    image_name_left = "Left{}.jpg".format(count)
    cv2.imwrite(os.path.join(img_pathL, image_name_left), left_img)

    print("{}".format(image_name_left), "&", "{} saved!".format(image_name_right))


def colouriseImages(img):
    normalized_img = 255 * (img - img.min()) / (img.max() - img.min())
    colour_img = cv2.applyColorMap(normalized_img.astype(np.uint8), cv2.COLORMAP_BONE)  # change colour here
    return colour_img


def depthMap(left_img, right_img): # need greyscale image
    # SGBM Parameters -----------------
    window_size = 5  # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely

    left_matcher = cv2.StereoSGBM_create(
        minDisparity=-1,
        numDisparities=1*16,  # max_disp has to be dividable by 16 f. E. HH 192, 256, NOTE this affect the disparity map shape
        blockSize=window_size,
        P1=8 * 3 * window_size,
        # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
        P2=32 * 3 * window_size,
        disp12MaxDiff=12,
        uniquenessRatio=10,  # suggested values between 5 - 15
        speckleWindowSize=50,  # suggested values between 50 - 200
        speckleRange=32,  # 1 or 2 multiple of 16
        preFilterCap=63,
        mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
    )
    right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
    # FILTER Parameters
    lmbda = 80000
    sigma = 1.3
    visual_multiplier = 6

    wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
    wls_filter.setLambda(lmbda)

    wls_filter.setSigmaColor(sigma)
    displ = left_matcher.compute(left_img, right_img)  # .astype(np.float32)/16
    dispr = right_matcher.compute(right_img, left_img)  # .astype(np.float32)/16
    displ = np.int16(displ)
    dispr = np.int16(dispr)
    filteredImg = wls_filter.filter(displ, left_img, None, dispr)  # important to put "imgL" here!!!

    filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
    filteredImg = np.uint8(filteredImg)

    return filteredImg


def enlargeImages(img, scale_res):
    enlarge_img = cv2.resize(img, scale_res, interpolation=cv2.INTER_CUBIC)
    return enlarge_img


def exitProgram():
    cv2.destroyAllWindows()
    print("CLOSING PROGRAM.")
    cv2.waitKey(300)
    print("CLOSING PROGRAM..")
    cv2.waitKey(200)
    print("CLOSING PROGRAM...")
    cv2.waitKey(100)


def rectifyImages(img, mtx, dist, Omtx, roi):
    calibrated_img = cv2.undistort(img, mtx, dist, None, Omtx)
    x, y, w, h = roi
    calibrated_img = calibrated_img[y:y + h, x:x + w]
    return calibrated_img


def rotateImages(img, angle):
    rotated_img = imutils.rotate_bound(img, angle)
    # (h, w) = img.shape[:2]
    # (cX, cY) = (w // 2, h // 2)
    # m = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)  # rotates 90 degrees
    # rotated_img = cv2.warpAffine(img, m, (200, 200))
    # rotated_img = rotated_img[0:160, 0:160]
    return rotated_img


def stackImages(scale, img_array):
    rows = len(img_array)
    cols = len(img_array[0])
    rows_available = isinstance(img_array[0], list)
    width = img_array[0][0].shape[1]
    height = img_array[0][0].shape[0]
    if rows_available:
        for x in range(0, rows):
            for y in range(0, cols):
                if img_array[x][y].shape[:2] == img_array[0][0].shape[:2]:
                    img_array[x][y] = cv2.resize(img_array[x][y], (0, 0), None, scale, scale)
                else:
                    img_array[x][y] = cv2.resize(img_array[x][y], (img_array[0][0].shape[1], img_array[0][0].shape[0]),
                                                 None, scale, scale)
                if len(img_array[x][y].shape) == 2: img_array[x][y] = cv2.cvtColor(img_array[x][y], cv2.COLOR_GRAY2BGR)
        image_blank = np.zeros((height, width, 3), np.uint8)
        hor = [image_blank] * rows
        for x in range(0, rows):
            hor[x] = np.hstack(img_array[x])
        ver = np.vstack(hor)
    else:
        for x in range(0, rows):
            if img_array[x].shape[:2] == img_array[0].shape[:2]:
                img_array[x] = cv2.resize(img_array[x], (0, 0), None, scale, scale)
            else:
                img_array[x] = cv2.resize(img_array[x], (img_array[0].shape[1], img_array[0].shape[0]), None, scale,
                                          scale)
            if len(img_array[x].shape) == 2: img_array[x] = cv2.cvtColor(img_array[x], cv2.COLOR_GRAY2BGR)
        hor = np.hstack(img_array)
        ver = hor
    return ver


def stereoCalibrateImages(mtxL, distL, mtxR, distR, objpointsL, imgpointsL, imgpointsR, left_img):
    flags = 0
    # flags |= cv2.CALIB_FIX_INTRINSIC
    # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
    flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    # flags |= cv2.CALIB_FIX_FOCAL_LENGTH
    # flags |= cv2.CALIB_FIX_ASPECT_RATIO
    # flags |= cv2.CALIB_ZERO_TANGENT_DIST
    # flags |= cv2.CALIB_RATIONAL_MODEL
    # flags |= cv2.CALIB_SAME_FOCAL_LENGTH
    # flags |= cv2.CALIB_FIX_K3
    # flags |= cv2.CALIB_FIX_K4
    # flags |= cv2.CALIB_FIX_K5
    ret, mtxLS, distLS, mtxRS, distRS, R, T, E, F = cv2.stereoCalibrate(objpointsL, imgpointsL,
                                                                 imgpointsR, mtxL, distL, mtxR, distR, image_size)
    print("Stereo calibration rms: ", ret)
    RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(mtxLS, distLS, mtxRS, distRS, image_size, R, T,
                                                               flags=cv2.CALIB_ZERO_DISPARITY, alpha=0.5)
                                                                # alpha = -1 let OpenCV optimize black parts
                                                                # alpha = 0 rotate and cut the image so that there will be no black parts, results in poor quality
                                                                # alpha = 1 make the transform but dont cut anything

    return(mtxLS, distLS, mtxRS, distRS, RL, RR, PL, PR)


def stereoRectifyImages(mtxLS, distLS, mtxRS, distRS, RL, RR, PL, PR, left_img, right_img):
    height, width, channel = left_img.shape
    img_size = (height, width)
    leftMapX, leftMapY = cv2.initUndistortRectifyMap(mtxLS, distLS, RL, PL, img_size, cv2.CV_32FC1)  # cv2.CV_16SC2 this format enables us the programme to work faster
    left_rectified = cv2.remap(left_img, leftMapX, leftMapY, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

    rightMapX, rightMapY = cv2.initUndistortRectifyMap(mtxRS, distRS, RR, PR, img_size, cv2.CV_32FC1)
    right_rectified = cv2.remap(right_img, rightMapX, rightMapY, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)

    # # We need grayscale for disparity map.
    gray_left = cv2.cvtColor(left_rectified, cv2.COLOR_BGR2GRAY)
    gray_right = cv2.cvtColor(right_rectified, cv2.COLOR_BGR2GRAY)

    return left_rectified, right_rectified


def welcomeMessage():
    print("###################################################################\n"
          "#####            Welcome to the Lepton Stereo Project         #####\n"
          "###################################################################\n"
          "Press SPACE to manually capture images(calibration)################\n"
          "Press a to automatically capture images(calibration)###############\n"
          "Press s to automatically capture stereo images (calibration)#######\n"
          "Press c to calibrate###############################################\n"
          "Press ESC to end program###########################################\n"
          "###################################################################\n")


# Initialise
cam1 = Lepton()
cam2 = Lepton()

img_writer = 0
stereo_img_writer = 0
calibrationCheck = False
auto_cap_state = False
auto_cap_stereo_state = False

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
image_size = None


# Main Program

welcomeMessage()

while True:
    # Get images where 1 is right and 2 is left
    img_r = cam1.grab(0).astype(np.float32)  # .grab() takes serial port no. aka camera ID
    img_l = cam2.grab(3).astype(np.float32)

    # Colourise
    img_r = colouriseImages(img_r)
    img_l = colouriseImages(img_l)

    # Rotate & remove black bars
    img_r = rotateImages(img_r, -90)
    img_l = rotateImages(img_l, 90)

    img_captured = autoCapture(img_l, img_r, auto_cap_state)
    if img_captured == True and img_writer < 20:
        img_writer += 1
    else:
        auto_cap_state = False
    img_stereo_captured = autoCaptureStereo(img_l, img_r, auto_cap_stereo_state)
    if img_stereo_captured == True and stereo_img_writer < 20:
        stereo_img_writer += 1
    else:
        auto_cap_stereo_state = False

    # Display images
    if calibrationCheck == False:
        disparity_image = depthMap(cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY), cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY))
        # disparity_image = disparity_image[0:160, 79:119]
        # Enlarge, but requires more resources
        # img_r = enlargeImages(img_r, (480, 640))
        # img_l = enlargeImages(img_l, (480, 640))
        # disparity_image = enlargeImages(disparity_image, (480, 640))

        img_stack = stackImages(1, ([img_l, img_r, disparity_image]))  # LEFT and RIGHT
        cv2.imshow('Original Stereo', img_stack)

    else:
        # img_r = rectifyImages(img_r, mtxR, distR, OmtxR, roiR)
        # img_l = rectifyImages(img_l, mtxL, distL, OmtxL, roiL)
        gray_left, gray_right = stereoRectifyImages(mtxLS, distLS, mtxRS, distRS, RL, RR, PL, PR, img_l, img_r)
        disparity_image = depthMap(gray_left, gray_right)
        # disparity_image = disparity_image[0:160, 79:119]

        # Enlarge, but requires more resources
        # img_r = enlargeImages(img_r, (480, 480))
        # img_l = enlargeImages(img_l, (480, 480))
        # disparity_image = enlargeImages(disparity_image, (480, 480))

        img_stack = stackImages(1, ([img_l, img_r, disparity_image]))
        cv2.imshow('Calibrated stereo', img_stack)

    k = cv2.waitKey(1)
    if k % 256 == 27:  # ESC pressed
        exitProgram()
        break

    elif k % 256 == 32:  # SPACE pressed
        # Manually capture images (for calibration)
        img_path1 = \
            'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/Captured Images'
        img_path2 = \
            'C:/Users/clare/PycharmProjects/ThermalCameraProject/Lepton Projects/Lepton-Stereo Project/temp'
        captureImages(img_l, img_r, img_writer, img_path1, img_path2)
        img_writer += 1

    elif k % 256 == 97:  # a is pressed
        # Detect corners for calibration & auto capture images
        auto_cap_state = True
        print("Auto Capture for mono calibration is ON\n"
              "Press b to stop\n")

    elif k % 256 == 115:  # s is pressed
        auto_cap_stereo_state = True
        print("Auto Capture for stereo calibration is ON\n"
              "Press b to stop\n")

    elif k % 256 == 98:  # b is pressed
        # Exit auto capture
        auto_cap_state = False
        auto_cap_stereo_state = False
        print("Auto Capture for mono/stereo calibration is OFF")

    elif k % 256 == 99:  # c is pressed
        # Get calibration Parameters
        cv2.destroyAllWindows()
        mtxR, distR, OmtxR, roiR, objpointsR, imgpointsR = calibrateImages('R')
        mtxL, distL, OmtxL, roiL, objpointsL, imgpointsL = calibrateImages('L')
        mtxLS, distLS, mtxRS, distRS, RL, RR, PL, PR = stereoCalibrateImages(mtxL, distL, mtxR, distR, objpointsL, imgpointsL, imgpointsR, img_l)
        calibrationCheck = True

# Close
cam1.close()
cam2.close()

![image|690x367]

The images show how it looks

This is before the calibration and rectification