diff --git a/modules/aruco/include/opencv2/aruco.hpp b/modules/aruco/include/opencv2/aruco.hpp index 32d011404fe..fac4dee81d4 100644 --- a/modules/aruco/include/opencv2/aruco.hpp +++ b/modules/aruco/include/opencv2/aruco.hpp @@ -40,6 +40,7 @@ the use of this software, even if advised of the possibility of such damage. #define __OPENCV_ARUCO_HPP__ #include +#include #include #include "opencv2/aruco/dictionary.hpp" @@ -243,7 +244,55 @@ struct CV_EXPORTS_W DetectorParameters { CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &dictionary, OutputArrayOfArrays corners, OutputArray ids, const Ptr ¶meters = DetectorParameters::create(), OutputArrayOfArrays rejectedImgPoints = noArray()); +/** @brief + * rvec/tvec define the right handed coordinate system of the marker. + * PatternPos defines center this system and axes direction. + * Axis X (red color) - first coordinate, axis Y (green color) - second coordinate, + * axis Z (blue color) - third coordinate. + * @sa estimatePoseSingleMarkers(), @ref tutorial_aruco_detection + */ +enum PatternPos { + /** @brief The marker coordinate system is centered on the middle of the marker. + * The coordinates of the four corners (CCW order) of the marker in its own coordinate system are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes.jpg) + */ + CCW_center, + /** @brief The marker coordinate system is centered on the top-left corner of the marker. + * The coordinates of the four corners (CW order) of the marker in its own coordinate system are: + * (0, 0, 0), (markerLength, 0, 0), + * (markerLength, markerLength, 0), (0, markerLength, 0). + * + * These pattern points define this coordinate system: + * ![Image with axes drawn](images/singlemarkersaxes2.jpg) + */ + CW_top_left_corner +}; + +/** @brief + * Pose estimation parameters + * @param pattern Defines center this system and axes direction (default PatternPos::CCW_center). + * @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided + * rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further + * optimizes them (default false). + * @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE). + * @sa PatternPos, solvePnP(), @ref tutorial_aruco_detection + */ +struct CV_EXPORTS_W EstimateParameters { + CV_PROP_RW PatternPos pattern; + CV_PROP_RW bool useExtrinsicGuess; + CV_PROP_RW SolvePnPMethod solvePnPMethod; + + EstimateParameters(): pattern(CCW_center), useExtrinsicGuess(false), + solvePnPMethod(SOLVEPNP_ITERATIVE) {} + CV_WRAP static Ptr create() { + return makePtr(); + } +}; /** @@ -264,21 +313,28 @@ CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr &diction * @param tvecs array of output translation vectors (e.g. std::vector). * Each element in tvecs corresponds to the specific marker in imgPoints. * @param _objPoints array of object points of all the marker corners + * @param estimateParameters set the origin of coordinate system and the coordinates of the four corners of the marker + * (default estimateParameters.pattern = PatternPos::CCW_center, estimateParameters.useExtrinsicGuess = false, + * estimateParameters.solvePnPMethod = SOLVEPNP_ITERATIVE). * * This function receives the detected markers and returns their pose estimation respect to * the camera individually. So for each marker, one rotation and translation vector is returned. * The returned transformation is the one that transforms points from each marker coordinate system * to the camera coordinate system. - * The marker corrdinate system is centered on the middle of the marker, with the Z axis - * perpendicular to the marker plane. - * The coordinates of the four corners of the marker in its own coordinate system are: - * (0, 0, 0), (markerLength, 0, 0), - * (markerLength, markerLength, 0), (0, markerLength, 0) + * The marker coordinate system is centered on the middle (by default) or on the top-left corner of the marker, + * with the Z axis perpendicular to the marker plane. + * estimateParameters defines the coordinates of the four corners of the marker in its own coordinate system (by default) are: + * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), + * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0) * @sa use cv::drawFrameAxes to get world coordinate system axis for object points + * @sa @ref tutorial_aruco_detection + * @sa EstimateParameters + * @sa PatternPos */ CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength, InputArray cameraMatrix, InputArray distCoeffs, - OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray()); + OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray(), + Ptr estimateParameters = EstimateParameters::create()); diff --git a/modules/aruco/src/aruco.cpp b/modules/aruco/src/aruco.cpp index f52fef52aad..d7eabd776d2 100644 --- a/modules/aruco/src/aruco.cpp +++ b/modules/aruco/src/aruco.cpp @@ -784,19 +784,31 @@ static void _identifyCandidates(InputArray grey, /** - * @brief Return object points for the system centered in a single marker, given the marker length + * @brief Return object points for the system centered in a middle (by default) or in a top left corner of single + * marker, given the marker length */ -static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints) { +static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints, + EstimateParameters estimateParameters) { CV_Assert(markerLength > 0); _objPoints.create(4, 1, CV_32FC3); Mat objPoints = _objPoints.getMat(); // set coordinate system in the top-left corner of the marker, with Z pointing out - objPoints.ptr< Vec3f >(0)[0] = Vec3f(0.f, 0.f, 0); - objPoints.ptr< Vec3f >(0)[1] = Vec3f(markerLength, 0.f, 0); - objPoints.ptr< Vec3f >(0)[2] = Vec3f(markerLength, markerLength, 0); - objPoints.ptr< Vec3f >(0)[3] = Vec3f(0.f, markerLength, 0); + if (estimateParameters.pattern == CW_top_left_corner) { + objPoints.ptr(0)[0] = Vec3f(0.f, 0.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength, 0.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength, markerLength, 0); + objPoints.ptr(0)[3] = Vec3f(0.f, markerLength, 0); + } + else if (estimateParameters.pattern == CCW_center) { + objPoints.ptr(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0); + objPoints.ptr(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0); + objPoints.ptr(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0); + } + else + CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern"); } /** @@ -1208,12 +1220,13 @@ void detectMarkers(InputArray _image, const Ptr &_dictionary, Output */ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, InputArray _cameraMatrix, InputArray _distCoeffs, - OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints) { + OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints, + Ptr estimateParameters) { CV_Assert(markerLength > 0); Mat markerObjPoints; - _getSingleMarkerObjectPoints(markerLength, markerObjPoints); + _getSingleMarkerObjectPoints(markerLength, markerObjPoints, *estimateParameters); int nMarkers = (int)_corners.total(); _rvecs.create(nMarkers, 1, CV_64FC3); _tvecs.create(nMarkers, 1, CV_64FC3); diff --git a/modules/aruco/test/test_charucodetection.cpp b/modules/aruco/test/test_charucodetection.cpp index 08e289b99db..91b4002d701 100644 --- a/modules/aruco/test/test_charucodetection.cpp +++ b/modules/aruco/test/test_charucodetection.cpp @@ -439,10 +439,12 @@ void CV_CharucoDiamondDetection::run(int) { } } + Ptr estimateParameters = aruco::EstimateParameters::create(); + estimateParameters->pattern = aruco::CW_top_left_corner; // estimate diamond pose vector< Vec3d > estimatedRvec, estimatedTvec; - aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, - distCoeffs, estimatedRvec, estimatedTvec); + aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix, distCoeffs, estimatedRvec, + estimatedTvec, noArray(), estimateParameters); // check result vector< Point2f > projectedDiamondCornersPose; diff --git a/modules/aruco/tutorials/aruco_board_detection/images/singlemarkersaxes2.jpg b/modules/aruco/tutorials/aruco_board_detection/images/singlemarkersaxes2.jpg new file mode 100644 index 00000000000..dc8edee15d9 Binary files /dev/null and b/modules/aruco/tutorials/aruco_board_detection/images/singlemarkersaxes2.jpg differ diff --git a/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown b/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown index ee0518b0c53..2e81d09b1f9 100644 --- a/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown +++ b/modules/aruco/tutorials/aruco_detection/aruco_detection.markdown @@ -286,8 +286,9 @@ translation vectors of the estimated poses will be in the same unit - The output parameters `rvecs` and `tvecs` are the rotation and translation vectors respectively, for each of the markers in `markerCorners`. -The marker coordinate system that is assumed by this function is placed at the center of the marker -with the Z axis pointing out, as in the following image. Axis-color correspondences are X: red, Y: green, Z: blue. Note the axis directions of the rotated markers in this image. +The marker coordinate system that is assumed by this function is placed in the center (by default) or +in the top left corner of the marker with the Z axis pointing out, as in the following image. +Axis-color correspondences are X: red, Y: green, Z: blue. Note the axis directions of the rotated markers in this image. ![Image with axes drawn](images/singlemarkersaxes.jpg) diff --git a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc index 1a1568da831..1d19978937b 100644 --- a/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc +++ b/modules/sfm/src/libmv_light/libmv/simple_pipeline/bundle.cc @@ -24,6 +24,7 @@ #include "ceres/ceres.h" #include "ceres/rotation.h" +#include "ceres/version.h" #include "libmv/base/vector.h" #include "libmv/logging/logging.h" #include "libmv/multiview/fundamental.h" @@ -485,7 +486,11 @@ void EuclideanBundleCommonIntrinsics( PackCamerasRotationAndTranslation(tracks, *reconstruction); // Parameterization used to restrict camera motion for modal solvers. +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + ceres::SubsetManifold *constant_translation_manifold = NULL; +#else ceres::SubsetParameterization *constant_translation_parameterization = NULL; +#endif if (bundle_constraints & BUNDLE_NO_TRANSLATION) { std::vector constant_translation; @@ -494,8 +499,13 @@ void EuclideanBundleCommonIntrinsics( constant_translation.push_back(4); constant_translation.push_back(5); +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + constant_translation_manifold = + new ceres::SubsetManifold(6, constant_translation); +#else constant_translation_parameterization = new ceres::SubsetParameterization(6, constant_translation); +#endif } // Add residual blocks to the problem. @@ -538,8 +548,13 @@ void EuclideanBundleCommonIntrinsics( } if (bundle_constraints & BUNDLE_NO_TRANSLATION) { +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + problem.SetParameterization(current_camera_R_t, + constant_translation_manifold); +#else problem.SetParameterization(current_camera_R_t, constant_translation_parameterization); +#endif } zero_weight_tracks_flags[marker.track] = false; @@ -586,10 +601,17 @@ void EuclideanBundleCommonIntrinsics( // Always set K3 constant, it's not used at the moment. constant_intrinsics.push_back(OFFSET_K3); +#if CERES_VERSION_MAJOR >= 3 || (CERES_VERSION_MAJOR >= 2 && CERES_VERSION_MINOR >= 1) + ceres::SubsetManifold *subset_manifold = + new ceres::SubsetManifold(OFFSET_MAX, constant_intrinsics); + + problem.SetManifold(ceres_intrinsics, subset_manifold); +#else ceres::SubsetParameterization *subset_parameterization = new ceres::SubsetParameterization(OFFSET_MAX, constant_intrinsics); problem.SetParameterization(ceres_intrinsics, subset_parameterization); +#endif } // Configure the solver.