OK - if you visit this link you can download an example left and right image and an NPZ file with the required parameters for the code below. Put them in a folder called data
and you should be able to use this code to generate a disparity map and point cloud similar to what I’ve posted above.
import cv2
from matplotlib import pyplot as plt
import numpy as np
# Define path and filename for output file
PATH = './data/'
OUTPUT_FILE = 'point_cloud.ply'
# Function to create point cloud file
# From https://github.com/OmarPadierna/3DReconstruction
def create_output(vertices, colors, filename):
colors = colors.reshape(-1, 3)
vertices = np.hstack([vertices.reshape(-1, 3), colors])
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
with open(filename, 'w') as f:
f.write(ply_header % dict(vert_num=len(vertices)))
np.savetxt(f, vertices, '%f %f %f %d %d %d')
# Load example images from cameras and prepare rectified / trimmed dicts
image = {
'left' : cv2.imread(PATH + 'left_fisheye.jpg'),
'right': cv2.imread(PATH + 'right_fisheye.jpg')
}
image_rectified = {}
image_trimmed = {}
# Load calibration parameters
pars = np.load(PATH + 'rectification_pars.npz')
K = {
'left' : pars['K1'],
'right': pars['K2']
}
D = {
'left' : pars['D1'],
'right': pars['D2']
}
rvecs = pars['rvecs']
tvecs = pars['tvecs']
im_size = pars['im_size']
output_size = pars['output_size']
# Initialise remaining dicts
R = {}
P = {}
map1 = {}
map2 = {}
# Fisheye stereo rectification
R['left'], R['right'], P['left'], P['right'], Q = cv2.fisheye.stereoRectify(
K1=K['left'], D1=D['left'],
K2=K['right'], D2=D['right'],
imageSize=im_size,
newImageSize=output_size,
R=rvecs, tvec=tvecs,
flags=cv2.fisheye.CALIB_ZERO_DISPARITY,
balance=0.3,
)
# Manual image cropping parameters
# Trim the black regions from rectified pincushion images and correct vertical offset
crop = {
'w': 600,
'h': 467,
'x': 33,
'y': 132,
'v': {
'left' : 0,
'right': 33
}
}
# Perform undistortion and rectification
for cam in ['left', 'right']:
# Computes undistortion and rectification maps
map1[cam], map2[cam] = cv2.fisheye.initUndistortRectifyMap(
K=K[cam],
D=D[cam],
R=R[cam],
P=P[cam],
size=output_size,
m1type=cv2.CV_16SC2
)
# Rectify input image
image_rectified[cam] = cv2.remap(
src=image[cam],
map1=map1[cam],
map2=map2[cam],
interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT
)
# Trim rectified image
image_trimmed[cam] = image_rectified[cam][
crop['y'] + crop['v'][cam]:crop['y'] + crop['v'][cam] + crop['h'],
crop['x']:crop['x'] + crop['w']
]
cv2.imwrite(PATH + '{}_rectified.jpg'.format(cam), image_rectified[cam])
cv2.imwrite(PATH + '{}_trimmed.jpg'.format(cam), image_trimmed[cam])
# Create SGBM object
stereo = cv2.StereoSGBM_create(
minDisparity=-1,
numDisparities=32,
blockSize=4,
uniquenessRatio=2,
speckleWindowSize=50,
speckleRange=1,
disp12MaxDiff=2,
P1=8 * 3 * 4 ** 2,
P2=32 * 3 * 4 ** 2,
mode=cv2.STEREO_SGBM_MODE_HH
)
# Compute disparity map and point cloud
disparity_map = stereo.compute(image_trimmed['left'], image_trimmed['right'])
points_3D = cv2.reprojectImageTo3D(disparity_map, Q)
# Show disparity map
plt.imshow(disparity_map, 'gray')
plt.show()
# Remove INF values from point cloud
points_3D[points_3D == float('+inf')] = 0
points_3D[points_3D == float('-inf')] = 0
# Get rid of points with value 0 (i.e no depth)
mask_map = disparity_map > disparity_map.min()
# Mask colors and points
colors = cv2.cvtColor(image_trimmed['left'], cv2.COLOR_BGR2RGB)
output_points = points_3D[mask_map]
output_colors = colors[mask_map]
# Generate point cloud
output_file = PATH + OUTPUT_FILE
create_output(output_points, output_colors, output_file)
print('All done!')
I’m aware that the input images are far from ideal and I’m working separately to improve them, but as far as I can tell the disparity map is reasonable given the inputs, while the point cloud seems to be totally off base.