I have 2 Aruco Markers (10mm x 10mm). I want to get the position and orientation of Marker B w.r.t to Marker A, so Marker A will be taken as origin. I have rotated Marker B 45° clockwise along z axis.
Markers Left One is Marker A and Right is Marker B
This is the function which detects and calculates the relative position. Is my approach correct and What can i do improve the pose estimation?
def pose_esitmation(origFrame, aruco_dict_type, matrix_coefficients, distortion_coefficients):
originID = 2
testMarkerID = 3
'''
frame - Frame from the video stream
matrix_coefficients - Intrinsic matrix of the calibrated camera
distortion_coefficients - Distortion coefficients associated with your camera
return:-
frame - The frame with the axis drawn on it
'''
arucoDict = cv2.aruco.Dictionary_get(aruco_dict_type)
arucoParams = cv2.aruco.DetectorParameters_create()
arucoParams.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
(corners, ids, rejected) = (None, None, None)
rvec_A = tvec_A = rvec_B = tvec_B = None
if len(origFrame.shape) == 3:
gray = cv2.cvtColor(origFrame,cv2.COLOR_BGR2GRAY)
else:
gray = origFrame
#ret, thresh = cv2.threshold(gray,binVal,255,cv2.THRESH_BINARY)
#cv2.imshow("Bin",cv2.resize(thresh,(0,0),fx=0.5,fy=0.5))
(corners, ids, rejected) = cv2.aruco.detectMarkers(gray, arucoDict,parameters=arucoParams)
#If markers are detected
if len(corners) > 0:
for i in range(0, len(ids)):
if ids[i] in [originID,testMarkerID]:
c = corners[i].reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = c
rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], 10, matrix_coefficients, distortion_coefficients)
cv2.drawFrameAxes(origFrame,cameraMatrix,distortion_coefficients,rvec,tvec,10,1)
cv2.putText(origFrame,f"ID: {ids[i]}",(int(topLeft[0]+60), int(topLeft[1] + 60)), cv2.FONT_HERSHEY_DUPLEX,textSize, (0, 255, 0), 2,cv2.LINE_AA)
if ids[i] == originID:
tvec_A = tvec[0]
rvec_A = rvec[0]
if ids[i] == testMarkerID:
tvec_B = tvec[0]
rvec_B = rvec[0]
r_a, _ = cv2.Rodrigues(rvec_A)
r_b, _ = cv2.Rodrigues(rvec_B)
r_a_inv = np.linalg.inv(r_a)
r_ab = np.matmul(r_a_inv, r_b)
print("tvev_A: ",tvec_A[0])
print("tvev_B: ",tvec_B[0])
print("tvev_B - tvec_A: ",tvec_B[0]-tvec_A[0])
print("np.subtract: ", np.subtract(tvec_B,tvec_A))
t_ab = np.dot(r_a_inv, np.subtract(tvec_B, tvec_A).T)
# Convert rotation matrix to degrees
r_ab_degrees = np.degrees(cv2.Rodrigues(r_ab)[0])
print("Translation vector from Marker A to Marker B: \n{} mm".format(t_ab))
print("Rotation matrix from Marker A to Marker B: \n{}".format(r_ab))
print("Rotation angles from Marker A to Marker B: \n{} degrees".format(r_ab_degrees))
return origFrame
Output:
Translation vector from Marker A to Marker B:
[[38.80152078]
[ 0.98232345]
[-3.50059865]] mm
Rotation matrix from Marker A to Marker B:
[[ 0.55048041 0.68722805 0.47401364]
[-0.59382103 0.72141064 -0.35629099]
[-0.58681165 -0.08534806 0.80521289]]
Rotation angles from Marker A to Marker B:
[[ 9.2311289 ]
[ 36.14272143]
[-43.64582976]] degrees