0

I'm trying to estimate distance between two aruco markers with standard euclidean norm. I calibrate camera, with 9x6 chessboard with each square of length 25mm. Markers have size of 10cm.

Then program detects 2 markers, calculates tVecs and rVecs of both od them, and draws all lines. At first, I thought that model is, that camera is "shooting" tVecs at markers, and program should calculate distance just by length of difference between tVecs of each marker, but results were wrong. Then I read that model used by OpenCV is tVecs are "shooting" towards camera and markers are center of coordinate system. Then I should inverse perspective and compose the tVecs, because of angle issue, and calculate it length. But this is not working properly too. I'm not sure but some results of estimatePoseSingleMarkers function are suprising and i wonder if it's calculating tVecs correctly. You can put your's calib data path in main if you want to run this code

import imutils
import cv2 as cv
from cv2 import aruco
import sys
import numpy as np
import glob
import math

MARKER_SIZE = 10 #cm

ARUCO_DICT = {
    "DICT_4X4_50": cv.aruco.DICT_4X4_50,
    "DICT_4X4_100": cv.aruco.DICT_4X4_100,
    "DICT_4X4_250": cv.aruco.DICT_4X4_250,
    "DICT_4X4_1000": cv.aruco.DICT_4X4_1000,
    "DICT_5X5_50": cv.aruco.DICT_5X5_50,
    "DICT_5X5_100": cv.aruco.DICT_5X5_100,
    "DICT_5X5_250": cv.aruco.DICT_5X5_250,
    "DICT_5X5_1000": cv.aruco.DICT_5X5_1000,
    "DICT_6X6_50": cv.aruco.DICT_6X6_50,
    "DICT_6X6_100": cv.aruco.DICT_6X6_100,
    "DICT_6X6_250": cv.aruco.DICT_6X6_250,
    "DICT_6X6_1000": cv.aruco.DICT_6X6_1000,
    "DICT_7X7_50": cv.aruco.DICT_7X7_50,
    "DICT_7X7_100": cv.aruco.DICT_7X7_100,
    "DICT_7X7_250": cv.aruco.DICT_7X7_250,
    "DICT_7X7_1000": cv.aruco.DICT_7X7_1000,
    "DICT_ARUCO_ORIGINAL": cv.aruco.DICT_ARUCO_ORIGINAL,
    "DICT_APRILTAG_16h5": cv.aruco.DICT_APRILTAG_16h5,
    "DICT_APRILTAG_25h9": cv.aruco.DICT_APRILTAG_25h9,
    "DICT_APRILTAG_36h10": cv.aruco.DICT_APRILTAG_36h10,
    "DICT_APRILTAG_36h11": cv.aruco.DICT_APRILTAG_36h11
}


def inversePerspective(rvec, tvec):
    R, _ = cv.Rodrigues(rvec)
    R = np.matrix(R).T
    invTvec = np.dot(-R, np.matrix(tvec))
    invRvec, _ = cv.Rodrigues(R)
    return invRvec, invTvec

def relativePosition(rvec1, tvec1, rvec2, tvec2):
    rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
    rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))

    # Inverse the second marker, the right one in the image
    invRvec, invTvec = inversePerspective(rvec2, tvec2)

    info = cv.composeRT(rvec1, tvec1, invRvec, invTvec)
    composedRvec, composedTvec = info[0], info[1]
    composedRvec = composedRvec.reshape((3, 1))
    composedTvec = composedTvec.reshape((3, 1))
    return composedRvec, composedTvec

if __name__ == '__main__':
    ############LOADING FILES######
    filename = "10.jpg"
    aruco_type = "DICT_5X5_100"
    
    arucoDict = cv.aruco.Dictionary_get(ARUCO_DICT[aruco_type])
    arucoParams = cv.aruco.DetectorParameters_create()
    image = cv.imread(filename)
    image = imutils.resize(image, width=1000)
    gray_frame = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

    ########CALIBRATION##############
    calib_data_path = "C:\\....../MultiMatrix.npz" 
    calib_data = np.load(calib_data_path)
    camMat = calib_data["camMatrix"]
    dist = calib_data["distCoef"]
    camera_r_vectors = calib_data["rVector"]
    camera_t_vectors = calib_data["tVector"]

    #############DETECT###########
    marker_corners, marker_IDs, rejected = aruco.detectMarkers(gray_frame, arucoDict, parameters=arucoParams)
    if marker_corners:
        center = []
        rVec, tVec, _ = aruco.estimatePoseSingleMarkers(marker_corners, MARKER_SIZE, camMat, dist)
        total_markers = range(0, marker_IDs.size)
        for ids, corners, i in zip(marker_IDs, marker_corners, total_markers):
            ##################DRAWING MARKERS############
            corners = corners.reshape((4,2))
            (topLeft,topRight,bottomRight,bottomLeft)=corners
            topRight = (int(topRight[0]), int(topRight[1]))
            bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
            topLeft = (int(topLeft[0]), int(topLeft[1]))
            bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))

            cv.line(image, topLeft, topRight, (0, 255, 0), 2)
            cv.line(image, topRight, bottomRight, (0, 255, 0), 2)
            cv.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
            cv.line(image, bottomLeft, topLeft, (0, 255, 0), 2)

            centerX = int((topLeft[0] + bottomRight[0]) / 2.0)
            centerY = int((topLeft[1] + bottomRight[1]) / 2.0)
            cv.circle(image, (centerX,centerY), 4, (0, 0, 255), -1)
            center.append(centerX)
            center.append(centerY)


    ###############RESULTS#############
    cv.line(image, (int(center[0]),int(center[1])), (int(center[2]),int(center[3])), (0, 255, 0), 2)

    print("\n############# TVecs:\n",tVec)
    composedRvec,composedTvec = relativePosition(rVec[1], tVec[1], rVec[0], tVec[0])
    print("\n#############ComposedTvec:\n",composedTvec)
    distcomp = np.linalg.norm(composedTvec[0][0]-composedTvec[1][0])
    distnorm = np.linalg.norm((tVec[0][0]-tVec[1][0]))
    cv.putText(image,f"c-dist:{round(distcomp,2)}  n-dist:{round(distnorm,2)}",bottomLeft,cv.FONT_HERSHEY_PLAIN,1.0,(0, 0, 255),2,cv.LINE_AA)

    cv.imshow(filename[:-4], image)
    cv.waitKey(0)
Kos2
  • 1

0 Answers0