In c++ cv::calibrateCamera doesn´t converge to the right solution but in python yes

Hello,

I’m working on calibrating a camera with C++, but it’s not working. The resulting matrix K has no relation to the camera’s actual values, with an RMSE (root mean square error) of approximately 200.0. However, using the same images in Python, it works with an RMSE of 0.05

I’m work on windows.
c++:
opencv 4.8.0 from vcpkg

python:
opencv-python 4.8.72

Code c++:

//parametros y criterios del patron de calibracion
const int rows=11;
const int columns=8;
const cv::Size tableSize(rows, columns);
const float world_scaling=15.0;
const cv::Size conv_size(11,11);
const cv::TermCriteria criteria(cv::TermCriteria::EPS | cv::TermCriteria::MAX_ITER, 100, 1.0E-5);

for (size_t i = 0; i < rows; ++i) {
	for (size_t j = 0; j < columns; ++j) {
		objp[i * columns + j] = cv::Point3f(j * world_scaling, i * world_scaling, 0.0);
	}
}
auto images = readImg("../data/cam1");
const int width = images[0].cols;
const int height = images[0].rows;
objPoints1.clear();
imgPoints1.clear();
for (size_t i = 0; i < images.size(); i++)
{
	std::vector<cv::Point2f> corners1;
	bool found_1 = cv::findChessboardCorners(images[i], tableSize, corners1,cv::CALIB_CB_ADAPTIVE_THRESH);
	if (found_1) {
		cv::cornerSubPix(images[i], corners1, conv_size, cv::Size(-1, -1), criteria);
		objPoints1.push_back(objp);
		imgPoints1.push_back(corners1);
	}
}

//Calibrar cada cámara individualmente 
//Camara 1
cv::Mat cameraMatrix1;
cv::Mat distCoeffs1;
std::vector<cv::Mat> rvecs1, tvecs1;
const cv::Size size(width, height);

double ret = cv::calibrateCamera(objPoints1, imgPoints1, size,
								 cameraMatrix1, distCoeffs1, rvecs1, tvecs1);

python:

objp = np.zeros((self.rows*self.columns,3), np.float32)
        objp[:,:2] = np.mgrid[0:self.rows,0:self.columns].T.reshape(-1,2)
        objp = self.world_scaling* objp
    
        # #Pixel coordinates of checkerboards
        imgpoints_l = [] # 2d points in image plane.
        objpoints_l = [] # 3d point in real world space
        for img_l,img_r in zip(self.Cam_left.list_images,self.Cam_right.list_images):
            found_l, corners_l = cv.findChessboardCorners(img_l, (self.rows, self.columns), flags=cv.CALIB_CB_ADAPTIVE_THRESH)
            if found_l:
                corners_l = cv.cornerSubPix(img_l, corners_l, self.conv_size, (-1, -1), self.criteria)
                objpoints_l.append(objp)
                imgpoints_l.append(corners_l)
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints_l, imgpoints_l,(width,height),None,None)

Could someone help me please? I don’t know what I’m doing bad.