Hi , I am trying to do an face detection using opencv in my toradex verdin board
The sample code of opencv facedetection is provided by the toradex itself i little modified it for my board
The camera is streams and working perfectly through gstreamer
gst-launch-1.0 v4l2src device=/dev/video0 ! ‘video/x-raw,format=NV16,width=800,height=600,framerate=60/1’ ! videoconvert ! queue ! fbdevsink sync=false
while running i am getting error can you guide me where i am wrong..
root@verdin-imx8mp-15460278:~# ./test-cpp
[ WARN:0@0.449] global /usr/src/debug/opencv/4.6.0.imx/modules/videoio/src/cap_gstreamer.cpp (1405) open OpenCV | GStreamer warning: Cannot query video position: status=0, value=-1, duration=-1
[ WARN:0@0.616] global /usr/src/debug/opencv/4.6.0.imx/modules/videoio/src/cap_v4l.cpp (902) open VIDEOIO(V4L2:/dev/video0): can’t open camera by index
ERROR: Could not access the camera!
Failed to open camera with ID 0
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/objdetect.hpp>
#include
#include
#include <stdio.h>
using namespace std;
using namespace cv;
CascadeClassifier face_cascade;
string window_name = “Face Detection Demo”;
String face_cascade_name = “/root/haarcascade_frontalface_alt2.xml”;
const int BORDER = 8; /* order between GUI elements to the edge of the image */
template string toString(T t)
{
ostringstream out;
out << t;
return out.str();
}
/*
Draw text into an image. Defaults to top-left-justified text,
but you can give negative x coords for right-justified text,
and/or negative y coords for bottom-justified text
Returns the bounding rect around the drawn text
/
Rect drawString(Mat img, string text, Point coord, Scalar color,
float fontScale = 0.6f, int thickness = 1, int fontFace = FONT_HERSHEY_COMPLEX)
{
/ Get the text size & baseline */
int baseline = 0;
Size textSize = getTextSize(text, fontFace, fontScale, thickness, &baseline);
baseline += thickness;
/* Adjust the coords for left/right-justified or top/bottom-justified */
if (coord.y >= 0) {
/*
* Coordinates are for the top-left corner of the text
* from the top-left of the image, so move down by one row.
*/
coord.y += textSize.height;
} else {
/*
* Coordinates are for the bottom-left corner of the text
* from the bottom-left of the image, so come up from the bottom
*/
coord.y += img.rows - baseline + 1;
}
/* Become right-justified if desired */
if (coord.x < 0) {
coord.x += img.cols - textSize.width + 1;
}
/* Get the bounding box around the text */
Rect boundingRect = Rect(coord.x, coord.y - textSize.height, textSize.width, baseline + textSize.height);
/* Draw anti-aliased text */
//putText(img, text, coord, fontFace, fontScale, color, thickness, CV_AA);
putText(img, text, coord, fontFace, fontScale, color, thickness, cv::LINE_AA);
/* Let the user know how big their text is, in case they want to arrange things */
return boundingRect;
}
int main(int argc, const char** argv)
{
VideoCapture capture;
Mat frame;
std::vector faces;
Mat frame_gray;
// int deviceID = 0; // 0 = open default camera
// int apiID = cv::CAP_V4L2; // 0 = autodetect default API
if (!face_cascade.load( face_cascade_name ) ) {
printf("--(!)Error loading training file: haarcascade_frontalface_alt2.xml\n");
return -1;
};
try {
std::string pipeline =
"v4l2src device=/dev/video0 io-mode=mmap ! "
"video/x-raw,format=NV16,width=800,height=600,framerate=30/1 ! "
"videoconvert ! video/x-raw,format=BGR ! appsink";
capture.open(pipeline, cv::CAP_GSTREAMER);
// capture.open(deviceID, apiID);
// capture.open("v4l2:///dev/video0");
// capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
// capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
capture.set(cv::CAP_PROP_FRAME_WIDTH, 800);
capture.set(cv::CAP_PROP_FRAME_HEIGHT, 600);
}
catch (cv::Exception &e)
{
const char *err_msg = e.what();
cout << "Exception caught: " << err_msg << endl;
}
if ( !capture.isOpened() ) {
cout << "ERROR: Could not access the camera!" << endl;
std::cerr << "Failed to open camera with ID " << deviceID << std::endl;
exit(1);
}
while(true) {
capture >> frame;
if (!frame.empty()) {
cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
face_cascade.detectMultiScale(frame_gray,faces,1.2,3,0,cv::Size(80, 80));
for (size_t i = 0; i < faces.size(); i++) {
cv::Point pt1(faces[i].x, faces[i].y);
cv::Point pt2(faces[i].x + faces[i].width,
faces[i].y + faces[i].height);
cv::rectangle(frame, pt1, pt2,cv::Scalar(0, 255, 0),3);
Mat faceROI = frame_gray(faces[i]);
}
string stringToDisplay = "Number Of Faces: " + toString(faces.size());
drawString(frame, stringToDisplay, Point(BORDER, -BORDER - 2 - 50), CV_RGB(0, 0, 0));
drawString(frame, stringToDisplay, Point(BORDER + 1, -BORDER - 1 - 50), CV_RGB(0, 255, 0));
imshow(window_name, frame);
} else {
printf(" --(!) No captured frame");
}
int c = waitKey(1);
if ((char)c == 27) {
break;
}
}
return 0;
}