I’m trying to get the real coordinates of points on a plane from a photo. The camera is calibrated, the distortion is calculated. I set the origin of coordinates using a marker. To check, I find the coordinates of the corners of the marker and get a strange result.
Here’s my code:
markerLenMm = 50.
# marker real world corners coordinates
objPoints = np.array([
(0, 0, 0),
(markerLenMm, 0, 0),
(markerLenMm, -markerLenMm, 0),
(0, -markerLenMm, 0)
], dtype=np.float32)
detectorParameters = cv2.aruco.DetectorParameters()
detectorParameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
detector = cv2.aruco.ArucoDetector(cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_50), detectorParameters)
(corners, _, _) = detector.detectMarkers(img)
_, rvec, tvec = cv2.solvePnP(objPoints, corners[0], mtx, dist, flags=cv2.SOLVEPNP_IPPE_SQUARE)
R: cv2.Mat
R, _ = cv2.Rodrigues(rvec)
Rt = np.column_stack((R, tvec))
P_mtx = newcameramtx.dot(Rt)
XYZ1 = np.array([[objPoints[2,0], objPoints[2,1], objPoints[2,2], 1]], dtype=np.float32).T
s = P_mtx.dot(XYZ1)[2,0] # scaling factor
img = cv2.undistort(img, mtx, dist, None, newcameramtx)
(corners, _, _) = detector.detectMarkers(img)
cv2.aruco.drawDetectedMarkers(img, corners)
for corner in corners[0][0]:
cam_point = np.array([[corner[0], corner[1], 1]], dtype=np.float32)
world_point = np.linalg.inv(R).dot((np.linalg.inv(newcameramtx).dot(s * cam_point.T) - tvec))
print(world_point)
Result is:
[[-0.0263353]
[ 0.64506942]
[-6.52279113]]
[[50.16659473]
[ 0.46985561]
[-4.42295059]]
[[ 5.00555960e+01]
[-5.00240551e+01]
[ 4.40749216e-03]]
[[ -0.05097593]
[-49.85405108]
[ -2.09138941]]
X and Y coordinates are almost there, but Z deviation is too large. What could be the reason?