I am finding aruco tag in image, then do a pos estimation on a raspberry pi.
minimal code might look like this :
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
Ptr<aruco::DetectorParameters> aruco_param() {
Ptr<aruco::DetectorParameters> p = aruco::DetectorParameters::create();
p->maxMarkerPerimeterRate = 1.0;
p->minMarkerPerimeterRate = 0.04;
p->cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX;
return p;
}
int main(int argc, char *argv[]) {
Ptr<aruco::DetectorParameters> param = aruco_param();
Ptr<aruco::Dictionary> dict =
aruco::getPredefinedDictionary(aruco::DICT_4X4_100);
Mat mtx = (Mat_<double>(3, 3) << 1.23424578e+03, 0.00000000e+00,
1.27563061e+03, 0.00000000e+00, 1.23424578e+03, 1.02267452e+03,
0.00000000e+00, 0.00000000e+00, 1.00000000e+00);
Mat dist = (Mat_<double>(1, 14) << 2.48388910e-01, 8.96432397e-03,
4.60522564e-04, 2.62749266e-04, 9.94878947e-04, 5.96768987e-01,
2.89157221e-02, 4.36330487e-03, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00);
cv::VideoCapture cap(0);
cap.set(CAP_PROP_FRAME_WIDTH, 2592);
cap.set(CAP_PROP_FRAME_HEIGHT, 1944);
cap.set(CAP_PROP_AUTO_EXPOSURE, 0.25);
cap.set(CAP_PROP_EXPOSURE, 80);
usleep(1 * 1000 * 1000);
while (true) {
Mat image;
cap >> image;
cvtColor(image, image, COLOR_BGR2GRAY);
extractChannel(image, image, 0);
vector<int> ids;
vector<vector<Point2f>> corners, rejected;
vector<Vec3d> rvecs, tvecs;
aruco::detectMarkers(image, dict, corners, ids, param, rejected);
if(ids.size() > 0){
aruco::estimatePoseSingleMarkers(corners, 0.05f, mtx, dist, rvecs, tvecs);
for (int i = 0; i < ids.size(); i++)
{
cout << ids[i] << ";" << tvecs[i] << ";" << rvecs[i] << endl;
}
}
}
And it mostly works. Now, tvec (and rvecs) are in the camera coordinate system. (tvecs unit is meter). And I don't really care about the camera coordinate system, instead, I want to use the tag 0 as the origin of my coordinate (I want to express all points in the tag0 coordinate system) (to be honest, I don't care about the tag 0 itself, but a point in the same plane, at (x,y) = (-0.58, -0.58), but I think I could figure it by myself)
I didn't do any work with matrix for a while, and I'm a bit confused by the rvecs format.
I think I can get the rotation matrix using Rodriguez, and my translation vector should not be hard to compute
cv::Mat R, t;
cv::Rodrigues(rvecs[0], R);
R = R.t();
t = -tvecs[0];
new_pos = R*Mat(tvecs[0]) + t // false
Could someone help me out on that last part ?
(you can assume that tvecs[0] contains information about tag 0 (even if it is not the case))