CalibrateHandEye gives back huge or wrong numbers for real robot but worked in simulated

I have a code for calibrate the camera and then i tried find the camera to gripper tranlation but the calibratehandeye function give back some random or huge numbers.

For simulated robot it worked with this input:

demo_positions = [[0.5225, 0.712, 0],
                  [0.5225, 0.8, 0],
                  [0.5, 0.8, 0],
                  [0.55, 0.8, 0],
                  [0.5225, 0.8, 0.1],
                  [0.5, 0.8, 0.1],
                  [0.55, 0.8, -0.1],
                  [0.5225, 0.8, -0.1],
                  [0.55, 0.3, -0.45], 
                  [0.2, 0.5, -0.4]]
demo_rotations = [[0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
                  [0,  0, -1,
                   -0.5555702,  0.8314696, -0,
                   0.8314696,  0.5555702,  0],
                  [0.5,  0.5, -0.7071068,
                   -0.7071068,  0.7071068,  0,
                   0.5,  0.5,  0.7071068]
                  ]

But not for the real robot:

demo_positions = [[0.465, 0.0005, 0.5768],
           [0.465, 0.0005, 0.6641],
           [0.465, 0.0005, 0.5144],
           [0.4651, -0.089, 0.5765],
           [0.4651, -0.0578, 0.5765],
           [0.4651, 0.0477, 0.5765],
           [0.4651, 0.0633, 0.5765],
           [0.4814, 0.0003, 0.5765],
           [0.4376, 0.0003, 0.5765],
           [0.4376, 0.0628, 0.5765],
           [0.4466, -0.0827, 0.5765],
           [0.4729, -0.0827, 0.5765],
           [0.4729, 0.059, 0.5765],
            [0.4856, -0.0853, 0.7126]]

demo_rotations = [[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-90,0,89.9],
[-45,-0.1,90.2]]

rot_matrices = []

for angles in demo_rotations:
    angles_rad = np.radians(angles)

    rotation = R.from_euler('zyx', angles_rad, degrees=False)

    rotation_matrix = rotation.as_matrix().flatten().tolist()

    rot_matrices.append(rotation_matrix)

demo_rotations = rot_matrices

Then i call the function like this:

mtx, dist, R_cam2gripper, t_cam2gripper = pose_estimation.findpose(True, demo_positions, demo_rotations)
print(t_cam2gripper)

For the simulated one i got: [[ 0.24499824] [-0.18012203] [-0.04774245]]
but for the real one i got:[-0.34161192] [-0.19377119] [ 0.00266968]
what is very wrong because it’s need to be like 0 0 -0.10 because the setup is this:

but i also got this sometimes:
[[ 5.71532296e+12] [-1.77540433e+12] [ 8.51635063e+14]]

And this is the code for the calculations:

import cv2 as cv
import numpy as np
import glob

CHECKERBOARD_WIDTH = 9
CHECKERBOARD_HEIGHT = 14
SQUARE_SIZE = 0.017
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)


def objpgen(middle):
    objp = np.zeros((CHECKERBOARD_HEIGHT * CHECKERBOARD_WIDTH, 3), np.float32)
    objp[:, :2] = np.mgrid[0:CHECKERBOARD_HEIGHT, 0:CHECKERBOARD_WIDTH].T.reshape(-1, 2)

    if middle:
        for i in range(len(objp)):
            objp[i][0] -= (CHECKERBOARD_HEIGHT - 1) / 2
            objp[i][1] -= (CHECKERBOARD_WIDTH - 1) / 2

    objp *= SQUARE_SIZE
    return objp


def prepareimage(img):
    lwr = np.array([0, 0, 0])
    upr = np.array([255, 100, 150])
    hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
    msk = cv.inRange(hsv, lwr, upr)
    krn = cv.getStructuringElement(cv.MORPH_RECT, (50, 30))
    dlt = cv.dilate(msk, krn, iterations=5)
    res = 255 - cv.bitwise_and(dlt, msk)
    return np.uint8(res)


# === CALIBRATE CAMERA ============================================================================

def calibrate(middle):
    # Prepare object points
    objp = objpgen(middle)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    images = [img for img in glob.glob('*.png') if not img.startswith('~')]
    # Process the images
    for fname in images:
        img = cv.imread(fname)
        imgbw = prepareimage(img)
        # Find the chess board corners
        ret, corners = cv.findChessboardCornersSB(imgbw, (CHECKERBOARD_HEIGHT, CHECKERBOARD_WIDTH), None)
        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)
            imgpoints.append(corners)

    # Obtain camera parameters
    ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, imgbw.shape[::-1], None, None)
    print(tvecs)
    return ret, mtx, dist, rvecs, tvecs


# === FIND POSE OF TARGETS ===========================================================================
def findpose(middle, camerat, camerar):
    rvecsarray = []
    tvecsarray = []
    R_gripper2base = []
    T_gripper2base = []

    ret, mtx, dist, rvecs, tvecs = calibrate(middle)
    # Prepare object points
    objp = objpgen(middle)

    images = [img for img in glob.glob('*.png') if not img.startswith('~')]
    for idx, fname in enumerate(images):
        img = cv.imread(fname)
        imgbw = prepareimage(img)
        ret, corners = cv.findChessboardCornersSB(imgbw, (CHECKERBOARD_HEIGHT, CHECKERBOARD_WIDTH ), None)
        if ret:

            corners2 = cv.cornerSubPix(imgbw, corners, (11, 11), (-1, -1), criteria)
            # Find the rotation and translation vectors.
            ret, rvecs, tvecs = cv.solvePnP(objp, corners2, mtx, dist)

            img = cv.drawFrameAxes(img, mtx, None, rvecs, tvecs, SQUARE_SIZE, 3)
            cv.imshow('Pose', img)
            cv.waitKey()

            R_gripper2base.append(np.array([[camerar[idx][0], camerar[idx][1], camerar[idx][2]],
                                            [camerar[idx][3], camerar[idx][4], camerar[idx][5]],
                                            [camerar[idx][6], camerar[idx][7], camerar[idx][8]]]))
            T_gripper2base.append(np.array([[camerat[idx][0]], [camerat[idx][1]], [camerat[idx][2]]]))

            rmatrix, _ = cv.Rodrigues(rvecs, np.eye(3))
            rvecsarray.append(rmatrix)
            tvecsarray.append(tvecs)

    R_cam2gripper, t_cam2gripper = cv.calibrateHandEye(R_gripper2base, T_gripper2base,
                                                       rvecsarray, tvecsarray)
    return mtx, None, R_cam2gripper, t_cam2gripper

I uploaded it to github too:

I really cant find what is wrong with the real one.
Please help me out if you can.