Hand-in-eye inconsistent and not accurate enough

Hi, my hand in eye keeps giving me inconsistent and not accurate enough results,ive done the calibration just how its required and im using the hand eye calibration math given by opencv but i dont see to understand. My code that uses the calibration file is below,can i get some help. my arm is the ufactory xarm 6 lite
: import numpy as np
import cv2
from scipy.spatial.transform import Rotation

— 1. CONSTANTS AND SETUP —

These must perfectly match your physical setup and calibration files.

File paths

CALIBRATION_FILE = ‘eye_in_hand_calibration.npz’

Physical layout of the small markers on the PCB

ARUCO_DICT_ID = cv2.aruco.DICT_ARUCO_ORIGINAL
MARKER_SIZE_M = 0.008 # 8mm
MARKER_LAYOUT_MM = {
0: [-70, -30], 1: [70, -30], 2: [70, 30], 3: [-70, 30]
}

def create_homogeneous_transform(R, t):
“”“Creates a 4x4 transformation matrix from a rotation matrix and translation vector.”“”
T = np.eye(4)
T[:3, :3] = R
T[:3, 3] = t.flatten()
return T

— 2. LOAD CALIBRATION DATA —

This loads the results from your hand-eye calibration.

try:
with np.load(CALIBRATION_FILE) as data:
T_gripper_cam = data[‘X_gripper2cam’]
camera_matrix = data[‘camera_matrix’]
dist_coeffs = data[‘dist_coeffs’]
print(“:white_check_mark: Calibration data loaded successfully.”)
print(“-” * 30)
except FileNotFoundError:
print(f":cross_mark: ERROR: Calibration file ‘{CALIBRATION_FILE}’ not found.“)
exit()
except KeyError as e:
print(f":cross_mark: ERROR: Calibration file is missing required key: {e}”)
exit()

— 3. SIMULATED LIVE DATA —

In your real app, this data comes from the camera and robot in real-time.

We are using sample data here for a predictable test.

Sample robot pose (gripper’s pose relative to its base)

[x, y, z, roll, pitch, yaw] in mm and degrees

robot_pose_mm_deg = [300, 50, 250, 180, 0, 0]

Sample detected corners of the 4 markers as they would appear on the camera image

In a real scenario, these values would be detected by cv2.aruco.detectMarkers

simulated_corners = [
np.array([[[550, 250], [650, 250], [650, 350], [550, 350]]], dtype=np.float32), # Marker 0
np.array([[[850, 250], [950, 250], [950, 350], [850, 350]]], dtype=np.float32), # Marker 1
np.array([[[850, 550], [950, 550], [950, 650], [850, 650]]], dtype=np.float32), # Marker 2
np.array([[[550, 550], [650, 550], [650, 650], [550, 650]]], dtype=np.float32) # Marker 3
]
simulated_ids = np.array([[0], [1], [2], [3]])

— 4. CORE CALCULATION FUNCTIONS —

def estimate_pcb_pose_from_corners(corners, ids):
“”"
Calculates the 6D pose of the PCB relative to the camera.
This is the first major calculation step.
“”"
obj_points =
img_points =

# Get the 3D coordinates of marker corners in the object's own frame
half_size_m = MARKER_SIZE_M / 2.0
marker_corners_3d_local = np.array([
    [-half_size_m, -half_size_m, 0], [ half_size_m, -half_size_m, 0],
    [ half_size_m,  half_size_m, 0], [-half_size_m,  half_size_m, 0]
], dtype=np.float32)

for i, marker_id in enumerate(ids.flatten()):
    if marker_id in MARKER_LAYOUT_MM:
        marker_center_m = np.array(MARKER_LAYOUT_MM[marker_id]) / 1000.0 # Convert to meters
        # Add the marker's offset to its local corner coordinates
        for corner_3d in marker_corners_3d_local:
            obj_points.append(corner_3d + np.append(marker_center_m, 0))
        # Add the detected 2D image points
        for corner_2d in corners[i][0]:
            img_points.append(corner_2d)

if not obj_points:
    return None, None

# Use solvePnP to find the rotation (rvec) and translation (tvec)
success, rvec, tvec = cv2.solvePnP(
    np.array(obj_points), np.array(img_points),
    camera_matrix, dist_coeffs
)

if not success:
    return None, None

return rvec, tvec

def get_final_pcb_pose_in_base(robot_pose, rvec_cam_pcb, tvec_cam_pcb):
“”"
Performs the final transformation chain to find the PCB’s pose in the robot’s world.
This is the second major calculation step.
“”"
# a) Create the transform for PCB → Camera
R_cam_pcb, _ = cv2.Rodrigues(rvec_cam_pcb)
T_cam_pcb = create_homogeneous_transform(R_cam_pcb, tvec_cam_pcb)

# b) Create the transform for Gripper -> Base from the robot's reported pose
position_m = np.array(robot_pose[:3]) / 1000.0 # Convert to meters
orientation_deg = robot_pose[3:]
R_base_gripper = Rotation.from_euler('xyz', orientation_deg, degrees=True).as_matrix()
T_base_gripper = create_homogeneous_transform(R_base_gripper, position_m)

# c) The complete transformation: Base <- Gripper <- Camera <- PCB
# This uses the hand-eye calibration matrix T_gripper_cam
T_base_pcb = T_base_gripper @ T_gripper_cam @ T_cam_pcb

return T_base_pcb, T_cam_pcb, T_base_gripper

— 5. EXECUTE AND DEBUG —

if name == “main”:
# Step 1: Calculate the PCB’s pose relative to the camera
rvec, tvec = estimate_pcb_pose_from_corners(simulated_corners, simulated_ids)
if rvec is None:
print(“:cross_mark: Could not estimate PCB pose from simulated corners.”)
else:
print(“:white_check_mark: PCB pose estimated relative to camera.”)
print(“-” * 30)

    # Step 2: Perform the full transformation
    final_transform, T_cam_pcb, T_base_gripper = get_final_pcb_pose_in_base(robot_pose_mm_deg, rvec, tvec)

    # Step 3: Print all matrices for debugging
    print("--- DEBUGGING MATRICES ---")
    print("\n[ T_cam_pcb ] (Pose of PCB in Camera's frame):")
    print(np.round(T_cam_pcb, 4))

    print("\n[ T_gripper_cam ] (Pose of Camera in Gripper's frame - from calibration):")
    print(np.round(T_gripper_cam, 4))

    print("\n[ T_base_gripper ] (Pose of Gripper in Robot's Base frame - from robot):")
    print(np.round(T_base_gripper, 4))

    print("\n[ FINAL T_base_pcb ] (Calculated pose of PCB in Robot's Base frame):")
    print(np.round(final_transform, 4))

    # Extract and print the final XYZ coordinates
    final_xyz_mm = final_transform[:3, 3] * 1000.0
    print("\n--- FINAL XYZ POSITION (mm) ---")
    print(f"X: {final_xyz_mm[0]:.2f}, Y: {final_xyz_mm[1]:.2f}, Z: {final_xyz_mm[2]:.2f}"). my camera is the realsense d435 and so the z axis falls in line with the x axis of the tool. it is placed 50 mm x and 20 mm y of the tcp.