This question is a mix of OpenCV and Raspberry Pi. I’ll try OpenCV first…
I have a Raspberry Pi 4 with Raspberry Pi OS 10, and a Pi Camera connected to the MIPI port.
I use a slightly modified version of this script by Tiziano Fiorenzani to perform precision landing with a drone. This usually works well, but every now and then, this error occurs:
2023-08-03 14:27:15,883 - root - ERROR - Traceback (most recent call last):
File "/home/pi/mission/cpa/drones/px4_drone.py", line 402, in handleAction_PreciseLanding2
await self.precisionLanding.land(action)
File "/home/pi/mission/cpa/drones/precisionlanding/precisionLanding_pid.py", line 196, in land
future = await loop.run_in_executor(executor, aruco_tracker.track)
File "/usr/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/pi/mission/cpa/drones/lib_aruco_pose_picam.py", line 176, in track
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images in Blue, Green, Red
cv2.error: OpenCV(4.5.4) /tmp/pip-wheel-4s1eeqwm/opencv-contrib-python_94a16b66d4594a43bcabaeb968fd51a1/opencv/modules/imgproc/src/color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cvtColor'
I occurs very seldom (maybe every 30 flights), and if it occurs, it occurs on the very first frame. A reboot of the software fixes the problem. for the next 30+ flights. However, it remains a lingering danger.
Here is my script. I mainly added the capability to recognize a second, bigger pattern additionally to a smaller one, so the drone can change between those two patters as it approaches.
import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math
class ArucoSingleTracker():
def __init__(self,
inner_id,
outer_id,
inner_marker_size,
outer_marker_size,
camera_matrix,
camera_distortion,
camera_size=[800,600],
show_video=False,
save_images=False
):
self.inner_id = inner_id
self.outer_id = outer_id
self.inner_marker_size = inner_marker_size
self.outer_marker_size = outer_marker_size
self.save_images = save_images
# class ArucoSingleTracker():
# def __init__(self,
# id_to_find,
# marker_size,
# camera_matrix,
# camera_distortion,
# camera_size=[640,480],
# show_video=False
# ):
# self.id_to_find = id_to_find
# self.marker_size = marker_size
self._show_video = show_video
self._camera_matrix = camera_matrix
self._camera_distortion = camera_distortion
self.is_detected = False
self._kill = False
#--- 180 deg rotation matrix around the x axis
self._R_flip = np.zeros((3,3), dtype=np.float32)
self._R_flip[0,0] = 1.0
self._R_flip[1,1] =-1.0
self._R_flip[2,2] =-1.0
#--- Define the aruco dictionary
self._aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_5X5_50)
self._parameters = aruco.DetectorParameters_create()
#--- Capture the videocamera (this may also be a video or a picture)
self._cap = cv2.VideoCapture("/dev/landingcam")
#-- Set the camera size as the one it was calibrated with
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_size[0])
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_size[1])
#-- Font for the text in the image
self.font = cv2.FONT_HERSHEY_PLAIN
self._t_read = time.time()
self._t_detect = self._t_read
self.fps_read = 0.0
self.fps_detect = 0.0
def _rotationMatrixToEulerAngles(self,R):
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def _update_fps_read(self):
t = time.time()
self.fps_read = 1.0/(t - self._t_read)
self._t_read = t
def _update_fps_detect(self):
t = time.time()
self.fps_detect = 1.0/(t - self._t_detect)
self._t_detect = t
def stop(self):
self._kill = True
def camera_to_uav(self, x_cam, y_cam):
x_uav =-y_cam
y_uav = x_cam
return(x_uav, y_uav)
def marker_position_to_angle(self, x, y, z):
angle_x = math.atan2(x,z)
angle_y = math.atan2(y,z)
return (angle_x, angle_y)
def track(self, loop=False, verbose=False, show_video=False):
self._kill = False
if show_video is None: show_video = self._show_video
yaw_marker = -999
marker_found = False
x = y = z = 0
while not self._kill:
#-- Read the camera frame
ret, frame = self._cap.read()
self._update_fps_read()
#-- Convert in gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images in Blue, Green, Red
#-- Find all the aruco markers in the image
corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=self._aruco_dict,
parameters=self._parameters,
cameraMatrix=self._camera_matrix,
distCoeff=self._camera_distortion)
id_to_estimate = None
marker_size_to_estimate = None
if not ids is None and self.inner_id in ids:
id_to_estimate = self.inner_id
marker_size_to_estimate = self.inner_marker_size
elif not ids is None and self.outer_id in ids:
id_to_estimate = self.outer_id
marker_size_to_estimate = self.outer_marker_size
if id_to_estimate is not None:
marker_found = True
self._update_fps_detect()
index = 0
for x in np.nditer(ids[0]):
if x == id_to_estimate:
break
index = index + 1
corners_to_estimate = (corners[index],)
ret = aruco.estimatePoseSingleMarkers(corners_to_estimate, marker_size_to_estimate, self._camera_matrix, self._camera_distortion)
#-- Unpack the output, get only the first
rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]
x = tvec[0]
y = tvec[1]
z = tvec[2]
#-- Draw the detected marker and put a reference frame over it
aruco.drawDetectedMarkers(frame, corners_to_estimate)
aruco.drawAxis(frame, self._camera_matrix, self._camera_distortion, rvec, tvec, 10)
#-- Obtain the rotation matrix tag->camera
R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
R_tc = R_ct.T
#-- Get the attitude in terms of euler 321 (Needs to be flipped first)
roll_marker, pitch_marker, yaw_marker = self._rotationMatrixToEulerAngles(self._R_flip*R_tc)
#-- Now get Position and attitude f the camera respect to the marker
pos_camera = -R_tc*np.matrix(tvec).T
# print "Camera X = %.1f Y = %.1f Z = %.1f - fps = %.0f"%(pos_camera[0], pos_camera[1], pos_camera[2],fps_detect)
if verbose: print ("Marker X = %.1f Y = %.1f Z = %.1f - fps = %.0f"%(tvec[0], tvec[1], tvec[2],self.fps_detect))
if show_video:
#-- Print the tag position in camera frame
str_position = "MARKER Position x=%4.0f y=%4.0f z=%4.0f"%(tvec[0], tvec[1], tvec[2])
cv2.putText(frame, str_position, (0, 100), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA)
#-- Print the marker's attitude respect to camera frame
str_attitude = "MARKER Attitude r=%4.0f p=%4.0f y=%4.0f"%(math.degrees(roll_marker),math.degrees(pitch_marker),
math.degrees(yaw_marker))
cv2.putText(frame, str_attitude, (0, 150), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA)
str_position = "CAMERA Position x=%4.0f y=%4.0f z=%4.0f"%(pos_camera[0], pos_camera[1], pos_camera[2])
cv2.putText(frame, str_position, (0, 200), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA)
#-- Get the attitude of the camera respect to the frame
roll_camera, pitch_camera, yaw_camera = self._rotationMatrixToEulerAngles(self._R_flip*R_tc)
str_attitude = "CAMERA Attitude r=%4.0f p=%4.0f y=%4.0f"%(math.degrees(roll_camera),math.degrees(pitch_camera),
math.degrees(yaw_camera))
cv2.putText(frame, str_attitude, (0, 250), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA)
else:
if verbose: print ("Nothing detected - fps = %.0f"%self.fps_read)
yaw_marker = -999
if show_video:
#--- Display the frame
cv2.imshow('frame', frame)
#--- use 'q' to quit
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self._cap.release()
cv2.destroyAllWindows()
break
if not loop: return(marker_found, x, y, z, yaw_marker, frame)
def track2(self, vehicle=None, loop=True, verbose=False, show_video=None, save_images = None, imgLogFolder = None, imgName = None):
self._kill = False
if show_video is None: show_video = self._show_video
if save_images is None: save_images = self.save_images
marker_found = False
x = y = z = 0
while not self._kill:
#-- Read the camera frame
ret, frame = self._cap.read()
angle_x, angle_y = 0, 0
if (vehicle is not None):
angle_x = vehicle.attitude.roll
angle_y = vehicle.attitude.pitch
self._update_fps_read()
#-- Convert in gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #-- remember, OpenCV stores color images in Blue, Green, Red
#-- Find all the aruco markers in the image
corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=self._aruco_dict,
parameters=self._parameters,
cameraMatrix=self._camera_matrix,
distCoeff=self._camera_distortion)
if not ids is None and (self.inner_id in ids or self.outer_id in ids):
marker_found = True
self._update_fps_detect()
#-- ret = [rvec, tvec, ?]
#-- array of rotation and position of each marker in camera frame
#-- rvec = [[rvec_1], [rvec_2], ...] attitude of the marker respect to camera frame
#-- tvec = [[tvec_1], [tvec_2], ...] position of the marker in camera frame
#ret = aruco.estimatePoseSingleMarkers(corners, self.marker_size, self._camera_matrix, self._camera_distortion)
marker_index = 0
if (self.inner_id in ids and self.outer_id not in ids):
marker_index = np.where(ids == self.inner_id)[0][0]
ret = aruco.estimatePoseSingleMarkers(np.array(corners[marker_index]), self.inner_marker_size, self._camera_matrix, self._camera_distortion)
elif (self.inner_id in ids and self.outer_id in ids):
marker_index = np.where(ids == self.outer_id)[0][0]
ret = aruco.estimatePoseSingleMarkers(np.array(corners[marker_index]), self.outer_marker_size, self._camera_matrix, self._camera_distortion)
else:
marker_index = np.where(ids == self.outer_id)[0][0]
ret = aruco.estimatePoseSingleMarkers(np.array(corners[marker_index]), self.outer_marker_size, self._camera_matrix, self._camera_distortion)
#-- Unpack the output, get only the first
rvec, tvec = ret[0][0,0,:], ret[1][0,0,:]
#rvec z = rollwinkel (im UZ, startet bei 0), y = gierwinkel (im UZ, startet bei 0), x = nickwinkel (im UZ, startet bei pi)
#rvec Drohne: y = nickwinkel (gegen UZ, startet bei 0), x = rollwinkel (im UZ, startet bei 0)
rvec = np.array([angle_x, angle_y, rvec[0]])
x = tvec[0] #Drone left +
y = tvec[1] # Drone Front +
z = tvec[2] # Drone Up +
#-- Draw the detected marker and put a reference frame over it
aruco.drawDetectedMarkers(frame, corners[marker_index:marker_index], ids=ids[marker_index:marker_index])
aruco.drawAxis(frame, self._camera_matrix, self._camera_distortion, rvec, tvec, 10)
#-- Obtain the rotation matrix tag->camera
R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
R_tc = R_ct.T
#-- Get the attitude in terms of euler 321 (Needs to be flipped first)
roll_marker, pitch_marker, yaw_marker = self._rotationMatrixToEulerAngles(self._R_flip*R_tc)
#-- Now get Position and attitude f the camera respect to the marker
pos_camera = -R_tc*np.matrix(tvec).T
#print("%s"%(self.get_rotation_corrected_vector(angle_x, angle_y, 0, np.array([[pos_camera[0,0]], [pos_camera[1,0]], [pos_camera[2,0]]]))))
# print "Camera X = %.1f Y = %.1f Z = %.1f - fps = %.0f"%(pos_camera[0], pos_camera[1], pos_camera[2],fps_detect)
if verbose: print ("Marker X = %.4f Y = %.4f Z = %.4f - fps = %.0f"%(tvec[0], tvec[1], tvec[2],self.fps_detect))
if show_video:
font = cv2.FONT_HERSHEY_PLAIN
#-- Print the tag position in camera frame
str_position = "MARKER Position x=%4.4f y=%4.4f z=%4.4f"%(tvec[0], tvec[1], tvec[2])
cv2.putText(frame, str_position, (0, 100), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
#-- Print the marker's attitude respect to camera frame
str_attitude = "MARKER Attitude r=%4.0f p=%4.0f y=%4.0f"%(math.degrees(roll_marker),math.degrees(pitch_marker),
math.degrees(yaw_marker))
cv2.putText(frame, str_attitude, (0, 150), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
str_position = "CAMERA Position x=%4.0f y=%4.0f z=%4.0f"%(pos_camera[0], pos_camera[1], pos_camera[2])
cv2.putText(frame, str_position, (0, 200), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
x_cm, y_cm = self.camera_to_uav(tvec[0], tvec[1])
angle_x, angle_y = self.marker_position_to_angle(x_cm, y_cm, tvec[2])
str_position = "CAMERA Angle x=%4f y=%4f"%(angle_x, angle_y)
cv2.putText(frame, str_position, (0, 250), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
else:
if verbose: print ("Nothing detected - fps = %.0f"%self.fps_read)
if show_video:
#--- Display the frame
#resized = cv2.resize(frame, (160, 140), interpolation = cv2.INTER_AREA)
cv2.imshow('frame', frame)
#--- use 'q' to quit
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self._cap.release()
cv2.destroyAllWindows()
break
if save_images:
imgPath = imgLogFolder + '/' + imgName + '.jpg'
cv2.imwrite(imgPath , frame)
if not loop: return(marker_found, x, y, z, frame)