I am trying to read data from picamera in raspberry pi 4 using YOLOv4. Here is the code I wrote:
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import cvlib as cv
from cvlib.object_detection import draw_bbox
def object_detection_with_bounding_boxes(filename, model="yolov3", confidence=0.2):
# Read the image into a numpy array
img = cv2.imread(filename)
# Perform the object detection
bbox, label, conf = cv.detect_common_objects(img, confidence=confidence, model=model)
# Print current image's filename
print(f"========================\nImage processed: {filename}\n")
# Print detected objects with confidence level
for l, c in zip(label, conf):
print(f"Detected object: {l} with confidence level of {c}\n")
# Create a new image that includes the bounding boxes
output_image = draw_bbox(img, bbox, label, conf)
# Save the image in the directory images_with_boxes
cv2.imwrite(f'images_with_boxes/{filename}', output_image)
# Display the image with bounding boxes
display(Image(f'images_with_boxes/{filename}'))
camera = PiCamera()
camera.resolution=(1280,720)
camera.framerate=32
raw_capture=PiRGBArray(camera,size=(1280,720))
time.sleep(0.1)
for frame in camera.capture_continuous(raw_capture,format="bgr",use_video_port=True):
image=frame.array
img2 = object_detection_with_bounding_boxes(image)
cv2.imshow("Frame",img2)
key=cv2.waitKey(1) & 0xFF
raw_capture.truncate(0)
if key==ord("q"):
break
I used the code from this link: Object Detection and Bounding Boxes with cvlib – Predictive Hacks
and I get the error below:
Traceback (most recent call last): File "/home/pi/Code_Files/objectdetect/Test2.py", line 44, in <module> img2 = object_detection_with_bounding_boxes(image) File "/home/pi/Code_Files/objectdetect/Test2.py", line 11, in object_detection_with_bounding_boxes img = cv2.imread(filename) TypeError: Can't convert object of type 'numpy.ndarray' to 'str' for 'filename'
objectdetection.py file is as below:
import cv2
import os
import numpy as np
from .utils import download_file
initialize = True
net = None
dest_dir = os.path.expanduser('~') + os.path.sep + '.cvlib' + os.path.sep + 'object_detection' + os.path.sep + 'yolo' + os.path.sep + 'yolov3'
classes = None
COLORS = np.random.uniform(0, 255, size=(80, 3))
def populate_class_labels():
class_file_name = 'yolov3_classes.txt'
class_file_abs_path = dest_dir + os.path.sep + class_file_name
url = 'https://github.com/arunponnusamy/object-detection-opencv/raw/master/yolov3.txt'
if not os.path.exists(class_file_abs_path):
download_file(url=url, file_name=class_file_name, dest_dir=dest_dir)
f = open(class_file_abs_path, 'r')
classes = [line.strip() for line in f.readlines()]
return classes
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_bbox(img, bbox, labels, confidence, colors=None, write_conf=False):
"""A method to apply a box to the image
Args:
img: An image in the form of a numPy array
bbox: An array of bounding boxes
labels: An array of labels
colors: An array of colours the length of the number of targets(80)
write_conf: An option to write the confidences to the image
"""
global COLORS
global classes
if classes is None:
classes = populate_class_labels()
for i, label in enumerate(labels):
if colors is None:
color = COLORS[classes.index(label)]
else:
color = colors[classes.index(label)]
if write_conf:
label += ' ' + str(format(confidence[i] * 100, '.2f')) + '%'
cv2.rectangle(img, (bbox[i][0],bbox[i][1]), (bbox[i][2],bbox[i][3]), color, 2)
cv2.putText(img, label, (bbox[i][0],bbox[i][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return img
def detect_common_objects(image, confidence=0.5, nms_thresh=0.3, model='yolov4', enable_gpu=False):
"""A method to detect common objects
Args:
image: A colour image in a numpy array
confidence: A value to filter out objects recognised to a lower confidence score
nms_thresh: An NMS value
model: The detection model to be used, supported models are: yolov3, yolov3-tiny, yolov4, yolov4-tiny
enable_gpu: A boolean to set whether the GPU will be used
"""
Height, Width = image.shape[:2]
scale = 0.00392
global classes
global dest_dir
if model == 'yolov3-tiny':
config_file_name = 'yolov3-tiny.cfg'
cfg_url = "https://github.com/pjreddie/darknet/raw/master/cfg/yolov3-tiny.cfg"
weights_file_name = 'yolov3-tiny.weights'
weights_url = 'https://pjreddie.com/media/files/yolov3-tiny.weights'
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
elif model == 'yolov4':
config_file_name = 'yolov4.cfg'
cfg_url = 'https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4.cfg'
weights_file_name = 'yolov4.weights'
weights_url = 'https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.weights'
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
elif model == 'yolov4-tiny':
config_file_name = 'yolov4-tiny.cfg'
cfg_url = 'https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4-tiny.cfg'
weights_file_name = 'yolov4-tiny.weights'
weights_url = 'https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights'
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
else:
config_file_name = 'yolov3.cfg'
cfg_url = 'https://github.com/arunponnusamy/object-detection-opencv/raw/master/yolov3.cfg'
weights_file_name = 'yolov3.weights'
weights_url = 'https://pjreddie.com/media/files/yolov3.weights'
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
config_file_abs_path = dest_dir + os.path.sep + config_file_name
weights_file_abs_path = dest_dir + os.path.sep + weights_file_name
if not os.path.exists(config_file_abs_path):
download_file(url=cfg_url, file_name=config_file_name, dest_dir=dest_dir)
if not os.path.exists(weights_file_abs_path):
download_file(url=weights_url, file_name=weights_file_name, dest_dir=dest_dir)
global initialize
global net
if initialize:
classes = populate_class_labels()
net = cv2.dnn.readNet(weights_file_abs_path, config_file_abs_path)
initialize = False
# enables opencv dnn module to use CUDA on Nvidia card instead of cpu
if enable_gpu:
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
max_conf = scores[class_id]
if max_conf > confidence:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - (w / 2)
y = center_y - (h / 2)
class_ids.append(class_id)
confidences.append(float(max_conf))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, confidence, nms_thresh)
bbox = []
label = []
conf = []
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
bbox.append([int(x), int(y), int(x+w), int(y+h)])
label.append(str(classes[class_ids[i]]))
conf.append(confidences[i])
return bbox, label, conf
class YOLO:
def __init__(self, weights, config, labels, version='yolov3'):
print('[INFO] Initializing YOLO ..')
self.config = config
self.weights = weights
self.version = version
with open(labels, 'r') as f:
self.labels = [line.strip() for line in f.readlines()]
self.colors = np.random.uniform(0, 255, size=(len(self.labels), 3))
self.net = cv2.dnn.readNet(self.weights, self.config)
layer_names = self.net.getLayerNames()
self.output_layers = [layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
def detect_objects(self, image, confidence=0.5, nms_thresh=0.3,
enable_gpu=False):
if enable_gpu:
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
Height, Width = image.shape[:2]
scale = 0.00392
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True,
crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
max_conf = scores[class_id]
if max_conf > confidence:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - (w / 2)
y = center_y - (h / 2)
class_ids.append(class_id)
confidences.append(float(max_conf))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, confidence, nms_thresh)
bbox = []
label = []
conf = []
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
bbox.append([int(x), int(y), int(x+w), int(y+h)])
label.append(str(self.labels[class_ids[i]]))
conf.append(confidences[i])
return bbox, label, conf
def draw_bbox(self, img, bbox, labels, confidence, colors=None, write_conf=False):
if colors is None:
colors = self.colors
for i, label in enumerate(labels):
color = colors[self.labels.index(label)]
if write_conf:
label += ' ' + str(format(confidence[i] * 100, '.2f')) + '%'
cv2.rectangle(img, (bbox[i][0],bbox[i][1]), (bbox[i][2],bbox[i][3]), color, 2)
cv2.putText(img, label, (bbox[i][0],bbox[i][1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
I don’t know how to solve the issue with this code. I am also not sure about the rest of the code. Can you help me with it?