Update: I had a bug where I had my robot poses in millimeter while I had specified the calibration board square size in meter. After fixing that, here is my result:
Rotation matrix:
[[ 0.99935638 0.02634614 -0.02434551]
[-0.00632889 0.79751818 0.60326172]
[ 0.03530961 -0.60271937 0.79717162]]
Translation vector:
[[ 0.31023636]
[-0.01218995]
[ 0. ]]
Still very weird that T_z is 0.
Here are the robot poses:
220, 0, 0, 0
240, 10, 55, 10
280, 60, 115, 20
250, -30, 65, -30
290, -15, 105, 70
245, 20, -35, 5
235, 10, 45, 30
235, -15, 20, -70
270, -40, 95, -90
285, -60, 120, -95
245, -50, 0, -95
Here is the code:
import cv2
import numpy as np
import glob
# Function to convert yaw angle (in degrees) to a rotation matrix
def yaw_to_rotation_matrix(yaw_degrees):
# Convert yaw angle to radians
yaw_radians = np.radians(yaw_degrees)
# Calculate rotation matrix
cos_yaw = np.cos(yaw_radians)
sin_yaw = np.sin(yaw_radians)
rotation_matrix = np.array([[cos_yaw, -sin_yaw, 0],
[sin_yaw, cos_yaw, 0],
[0, 0, 1]])
return rotation_matrix
# Lists to hold translation vectors and rotation matrices
translation_vectors = []
rotation_matrices = []
# Open and read the file
with open('calibration/calibration_poses.txt', 'r') as file:
for line in file:
# Split the line into components, convert to floats
x, y, z, yaw = [float(value.strip()) for value in line.split(',')]
# Create translation vector
translation_vector = np.array([x/1000, y/1000, z/1000])
# Convert yaw to rotation matrix
rotation_matrix = yaw_to_rotation_matrix(yaw)
# Append to the lists
translation_vectors.append(translation_vector)
rotation_matrices.append(rotation_matrix)
# Print the results
for translation_vector, rotation_matrix in zip(translation_vectors, rotation_matrices):
print(f'Translation Vector:\n{translation_vector}')
print(f'Rotation Matrix:\n{rotation_matrix}\n')
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Parameters
pattern_size = (6, 9) # Number of inner corners per a chessboard row and column (6x9 for example)
square_size = 0.0835/7 # Square size in meters
# Prepare object points (like (0,0,0), (1,0,0), (2,0,0) ....,(6,8,0))
objp = np.zeros((pattern_size[0]*pattern_size[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:pattern_size[0], 0:pattern_size[1]].T.reshape(-1,2)
objp *= square_size
# Arrays to store object points and image points
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
# Load your images and robot poses here
images = sorted(glob.glob('calibration_image_*.png'))
print(images)
R_gripper2base = rotation_matrices # Robot base to end-effector rotation matrices
t_gripper2base = translation_vectors # Robot base to end-effector translation vectors
ii=0
print('total number of images: ', len(images))
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, pattern_size, None)
# If found, add object points, image points (after refining them)
if ret:
ii+=1
print(fname)
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
cv2.drawChessboardCorners(img, pattern_size, corners2, ret)
cv2.imshow('img', img)
cv2.imwrite('{}.png'.format(ii), img)
print('{}.png'.format(ii))
cv2.waitKey(50)
cv2.destroyAllWindows()
print(ii , ' number of used images')
# Calibrate camera using the object and image points
rms_error, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print(rms_error, mtx)
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print( "total error: {}".format(mean_error/len(objpoints)) )
# Convert rotation vectors to rotation matrices
R_target2cam = [cv2.Rodrigues(rvec)[0] for rvec in rvecs]
t_target2cam = tvecs
print(tvecs)
# Perform eye to hand calibration
# https://forum.opencv.org/t/eye-to-hand-calibration/5690/10
R_base2gripper, t_base2gripper = [], []
for R, t in zip(R_gripper2base, t_gripper2base):
R_b2g = R.T
t_b2g = -R_b2g @ t
R_base2gripper.append(R_b2g)
t_base2gripper.append(t_b2g)
# TODO: note: figure out if we should put R_gripper2base or R_base2gripper, same for t. According to the link above, we should do base2gripper. But which one is actually base2gripper?
# TODO: also, what are actually the units here? seems like for robot it is mm, but for camera it is meter?
R, t = cv2.calibrateHandEye(
R_gripper2base=R_base2gripper,
t_gripper2base=t_base2gripper,
R_target2cam=R_target2cam,
t_target2cam=t_target2cam,
)
print("Rotation matrix:\n", R)
print("Translation vector:\n", t)
calibration errors are low, here are the printed values.
rms_error: 0.37555403620640343
total error: 0.04879402008734934