Eye-in-hand calibration with IRB140 axes are wrong

Hello. I try to make a Eye-in-hand calibration with an IRB140 robot arm.

It’s worked in simulation but now with the real robot its slightly off.

I got the base to gripper from Robotstudio’s rapid code, like this:
CONST robtarget Target_10:=[[465,0,400],[0.5,0.5,-0.5,-0.5],[0,0,0,0],[9E+09,9E+09,9E+09,9E+09,9E+09,9E+09]];

But we have a tool on the robot that messes up the rotation so i tried to rotate everything back with this code:

tcprot = np.dot(R.from_euler('xyz',[0,90,0],True).as_matrix(), np.linalg.inv(R.from_quat([0.5,0.5,-0.5,-0.5]).as_matrix()))
poses = []
rots = []
for target in irb140targets:
    poses.append(target[0])
    rots.append(np.dot(tcprot, R.from_quat(target[1]).as_matrix()).flatten().tolist())

Then i calibrate the camera:

def calibrate(middle):
    # Prepare object points
    objp = objpgen(middle)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    # find chessboard for all png except if it starts with ~
    images = [img for img in glob.glob('*.png') if not img.startswith('~')]
    # Process the images
    for fname in images:
        img = cv.imread(fname)
        imgbw = prepareimage(img)
        # Find the chess board corners
        ret, corners = cv.findChessboardCornersSB(imgbw, (CHECKERBOARD_HEIGHT, CHECKERBOARD_WIDTH), None)
        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)
            imgpoints.append(corners)
        else:
            print(fname)
            imgbw= cv2.resize(imgbw,(1024,768))
            img= cv2.resize(img,(1024,768))
            cv.imshow(fname,imgbw)
            cv.imshow("asd",img)
            cv.waitKey()

    # Obtain camera parameters
    ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, imgbw.shape[::-1], None, None)

    return ret, mtx, dist, rvecs, tvecs

Its seems good. Then try to get the cam2gripper with calibrateHandEye:

images = [img for img in glob.glob('*.png') if not img.startswith('~')]
    for idx, fname in enumerate(images):
        print(fname)
        img = cv.imread(fname)
        imgbw = prepareimage(img)
        ret, corners = cv.findChessboardCornersSB(imgbw, (CHECKERBOARD_HEIGHT, CHECKERBOARD_WIDTH), None)
        if ret:

            corners2 = cv.cornerSubPix(imgbw, corners, (11, 11), (-1, -1), criteria)
            # Find the rotation and translation vectors.
            ret, rvecs, tvecs = cv.solvePnP(objp, corners2, mtx, dist)

            # with this i got a relatable translation but weird 70 70 70 rotation
            R_gripper2baseArray.append(np.array([[camerar[idx][0], camerar[idx][1], camerar[idx][2]],
                                            [camerar[idx][3], camerar[idx][4], camerar[idx][5]],
                                            [camerar[idx][6], camerar[idx][7], camerar[idx][8]]]))
            t_g2b = camerat[idx]

            '''Also tried because the calibrateHandEye's paramater's name is gripper2base:
            With this i got a nice 90°angle but then the translations are way off
            
            R_gripper2baseArray.append(np.array([[camerar[idx][0], camerar[idx][1], camerar[idx][2]],
                                            [camerar[idx][3], camerar[idx][4], camerar[idx][5]],
                                            [camerar[idx][6], camerar[idx][7], camerar[idx][8]]]).T)
            t_g2b = -R_gripper2baseArray[idx] @ camerat[idx]
            
            '''

            T_gripper2baseArray.append(np.array([[t_g2b[0]], [t_g2b[1]], [t_g2b[2]]]))

            rmatrix, _ = cv.Rodrigues(rvecs, np.eye(3))
            rvecsarray.append(rmatrix)
            tvecsarray.append(tvecs)

    # get the rotation and translation vectors for cam to the gripper
    R_cam2gripper, t_cam2gripper = cv.calibrateHandEye(R_gripper2baseArray, T_gripper2baseArray,
                                                       rvecsarray, tvecsarray)
    return mtx, dist, R_cam2gripper, t_cam2gripper

The translation is seems okay, but the rotation is weird, i get ~70 ~70 ~70.

cam to gripper:
[[-108.00804741]
[ -17.58783339]
[ -16.13878481]]
[-70.99567046 73.49937437 -77.51142041]

When i tried to convert to gripper2base i got these values:

cam to gripper:
[[540.88451144]
[ 62.47913557]
[119.86727294]]
[-1.03713572 1.80041452 91.342652 ]

Then with all the values and with one more PNP i try to calculate the base2object with this code:

def worldposecalc(fname, middle, mtx, dist, R_base2gripper, t_base2gripper, R_cam2gripper, t_cam2gripper):
    objp = objpgen(middle)

    # the position from the Robotstudio (same as the real on the pendant)
    R_base2gripper = np.array([[R_base2gripper[0], R_base2gripper[1], R_base2gripper[2]],
                               [R_base2gripper[3], R_base2gripper[4], R_base2gripper[5]],
                               [R_base2gripper[6], R_base2gripper[7], R_base2gripper[8]]])
    t_base2gripper = np.array([[t_base2gripper[0]], [t_base2gripper[1]], [t_base2gripper[2]]])

    img = cv.imread(fname)
    imgbw = prepareimage(img)
    ret, corners = cv.findChessboardCornersSB(imgbw, (CHECKERBOARD_HEIGHT, CHECKERBOARD_WIDTH), None)

    if not ret:
        print("No checkerboard.")
        return

    corners2 = cv.cornerSubPix(imgbw, corners, (11, 11), (-1, -1), criteria)
    # Find the rotation and translation vectors for object to the camera
    ret, R_object2cam, t_object2cam = cv.solvePnP(objp, corners2, mtx, dist)

    img = cv.drawFrameAxes(img, mtx, None, R_object2cam, t_object2cam, SQUARE_SIZE, 3)
    cv.imshow('Pose', img)
    cv.waitKey()

    R_object2cam, _ = cv.Rodrigues(R_object2cam, np.eye(3))

    # combine the rot and trans to 4x4 matrix
    base2gripper = np.vstack((np.hstack((R_base2gripper, (t_base2gripper).flatten()[:, None])), [0, 0, 0, 1]))

    cam2gripper = np.vstack((np.hstack((R_cam2gripper, (t_cam2gripper).flatten()[:, None])), [0, 0, 0, 1]))

    object2cam = np.vstack((np.hstack((R_object2cam, (t_object2cam).flatten()[:, None])), [0, 0, 0, 1]))

    # get the base to object rot and trans
    base2object = np.dot(base2gripper, np.dot(cam2gripper, object2cam))

It’s close but the X and Y values are wrong. I run the code with 2 objp arrays, the first one is shifted to the middle, the second is in the origin(top right corner), so the Y should change ~-70 and the X should change ~44:

Middle: [641.6340941627703, -107.16331555942497, 127.43759350824172]
Top right: [585.5664540963195, -169.64934110933615, 127.8454609099532]

The last PNP values is changing with the correct values:

middle: Object to cam:
[[-28.54604136]
[-42.16923579]
[569.79698321]]
Top right: Object to cam:
[[ 42.5152117 ]
[-86.86914833]
[570.47972835]]

I dont know what the problem could be. It’s looks like the rotation is wrong, but i tried so many things with that.
If you could take the time to look at it, I would really appreciate it. This is my thesis and I’m really stuck here. I uploaded everything to github. (This is the first time that I am seriously dealing with robots.) If you need any information or have any questions, I will try to answer them to the best of my ability.

Github: GitHub - dzsiki/CalibrateHandEye