Right, here is the a more details description of the problem
void opencvExemple()
{
std::cout << "solvePnP forum opencv \n";
// input objectPoints
std::vector<cv::Point3d> objectPoints;
double marker_size = 0.04; // [m]
double half_size = marker_size /2;
double markers_relative_lateral_distance = 0.2; // [m]
objectPoints.push_back(cv::Point3d(-markers_relative_lateral_distance , -half_size, 1.0));
objectPoints.push_back(cv::Point3d(-markers_relative_lateral_distance + marker_size, -half_size, 1.0));
objectPoints.push_back(cv::Point3d(-markers_relative_lateral_distance + marker_size, half_size , 1.0));
objectPoints.push_back(cv::Point3d(-markers_relative_lateral_distance , half_size , 1.0));
objectPoints.push_back(cv::Point3d( markers_relative_lateral_distance - marker_size, -half_size, 1.0));
objectPoints.push_back(cv::Point3d( markers_relative_lateral_distance , -half_size, 1.0));
objectPoints.push_back(cv::Point3d( markers_relative_lateral_distance , half_size , 1.0));
objectPoints.push_back(cv::Point3d( markers_relative_lateral_distance - marker_size, half_size , 1.0));
// input imagePoints
std::vector<cv::Point2d> imagePoints;
imagePoints.push_back(cv::Point2d(197, 202));
imagePoints.push_back(cv::Point2d(212, 202));
imagePoints.push_back(cv::Point2d(212, 218));
imagePoints.push_back(cv::Point2d(197, 218));
imagePoints.push_back(cv::Point2d(285, 204));
imagePoints.push_back(cv::Point2d(301, 204));
imagePoints.push_back(cv::Point2d(301, 220));
imagePoints.push_back(cv::Point2d(285, 220));
// intrinsic matrix
cv::Mat cameraMatrix = (cv::Mat_<double>(3, 3) << 401.0513916015625, 0.0, 326.7859191894531,
0.0, 401.064453125, 206.62399291992188,
0.0, 0.0, 1.0);
// clang-format on
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
cv::solvePnPGeneric(objectPoints,
imagePoints,
cameraMatrix,
cv::Mat(),
rvecs,
tvecs,
false,
cv::SOLVEPNP_IPPE);
displayMat(rvecs.front());
displayMat(rvecs.back());
}
void displayMat(const cv::Mat &matrix)
{
// Display matrix values
std::cout << "Matrix (" << matrix.rows << "x" << matrix.cols << "):\n";
for (int i = 0; i < matrix.rows; ++i) {
for (int j = 0; j < matrix.cols; ++j) {
std::cout << matrix.at<double>(i, j) << " ";
}
std::cout << std::endl;
}
}
resulting rvecs are:
rvec = [0.00712227 , 0.724407 , 0.00441881];
norm(rvec) = 0.72rad = 41deg
rvec = [ -0.0290932 , 1.11246 ,0.0110151 ];
norm(rvec) = 1.11rad = 63deg
From what i understand, as objectPoints and imagePoints have the same layout and the same order, we can expect the rotation to be null or very close to zero (or eventually one axis close to pi/2, depending on how the axes are defined). So an rvec with values like:
rvec_expected = [~0.0, ~0.0, ~0.0];
rvec_expected = [~1.57, ~0.0, ~0.0];