Hello, I am using the Jetson Nano Developer Kit and an imx219-77 camera. I want to process the video stream created with Gstreamer using OpenCV to implement an object detection/tracking application. I have created the Gstreamer pipeline and can redirect each frame to the OpenCV side. However, when I try to process and display these frames using OpenCV, I get a fixed black image. I am sure that the camera is working as I have tested it in different ways. When I try to end the program with ‘waitKey()’, the assigned key does not work. For example, if I assign the ‘q’ key for ‘waitKey’, pressing ‘q’ does not trigger any response. I am sharing the code below. Thanks in advance for your assistance.
#include <gst/gst.h>
#include <gst/app/gstappsink.h>
#include <opencv2/opencv.hpp>
#include <thread>
#include <mutex>
#include <X11/Xlib.h>
#include <opencv2/highgui/highgui.hpp>
GMainLoop *loop = nullptr;
static int frame_counter = 0;
static int frameCounter = 0;
cv::Mat next_frame=cv::Mat::zeros(1280, 720, CV_8UC3);
std::mutex frameMutex;
static gboolean bus_call(GstBus *bus, GstMessage *msg, gpointer data) {
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS:
g_print("End of stream\n");
g_main_loop_quit(loop);
break;
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free(debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(loop);
break;
}
default:
break;
}
return TRUE;
}
static GstFlowReturn on_new_sample(GstElement *sink, gpointer user_data) {
frame_counter++; // Her çağrıldığında sayaç değerini artır
GstSample *sample = gst_app_sink_pull_sample(GST_APP_SINK(sink));
if (sample) {
GstCaps *caps = gst_sample_get_caps(sample);
GstStructure *structure = gst_caps_get_structure(caps, 0);
gint width, height;
gst_structure_get_int(structure, "width", &width);
gst_structure_get_int(structure, "height", &height);
GstBuffer *buffer = gst_sample_get_buffer(sample);
GstMapInfo map;
gst_buffer_map(buffer, &map, GST_MAP_READ);
cv::Mat current_frame = cv::Mat(height, width, CV_8UC3, map.data);
//cv::cvtColor(cv::Mat(height + height / 2, width, CV_8UC1, map.data), current_frame, cv::COLOR_YUV2BGR_NV12);
std::cout << "frame1 rows: " << current_frame.rows << ", cols: " << current_frame.cols << std::endl;
std::cout << "map.data: " << static_cast<void*>(map.data) << std::endl;
if (!current_frame.empty()) {
// Klonlama işlemi yerine doğrudan atama
frameMutex.lock();
current_frame.copyTo(next_frame);
frameMutex.unlock();
std::cout << "frame2 rows: " << next_frame.rows << ", cols: " << next_frame.cols << std::endl;
if (next_frame.data == nullptr) {
std::cerr << "Error: next_frame.data is nullptr after cloning." << std::endl;
}
g_print("Frame Received - Total Frames: %d\n", frame_counter);
//cv::imwrite("Opencv.jpg", current_frame);
//cv::imshow("opencv",current_frame);
//current_frame.release();
}
gst_buffer_unmap(buffer, &map);
gst_sample_unref(sample);
}
return GST_FLOW_OK;
}
void displayFrame() {
cv::Mat readyFrame=cv::Mat::zeros(1280, 720, CV_8UC3);
//cv::namedWindow("opencv", cv::WINDOW_AUTOSIZE);
while (true) {
if (!next_frame.empty()) {
g_print("Here: \n");
frameMutex.lock();
next_frame.copyTo(readyFrame);
frameMutex.unlock();
g_print("Here-1: \n");
//std::string fileName = "resimler/frame_" + std::to_string(frameCounter) + ".jpg";
//cv::imwrite(fileName, readyFrame);
frameCounter++;
cv::imshow("opencv", readyFrame);
char c=(char)cv::waitKey();
if(c==27 || c == 'q')
{
g_print("Exiting...\n");
break;
}
g_print("Here-2: \n");
}
}
}
int main(int argc, char *argv[]) {
// Initialize GStreamer
XInitThreads();
g_main_context_push_thread_default(nullptr);
gst_init(&argc, &argv);
// Create elements
GstElement *pipeline = gst_pipeline_new("mypipeline");
GstElement *source = gst_element_factory_make("nvarguscamerasrc", "src");
GstElement *capsfilter1 = gst_element_factory_make("capsfilter", "filter1");
GstElement *nvvidconv1 = gst_element_factory_make("nvvidconv", "nvvidconv1");
//GstElement *queue = gst_element_factory_make("queue", "queue");
GstElement *videoconvert = gst_element_factory_make("videoconvert", "videoconvert");
GstElement *appsink = gst_element_factory_make("appsink", "appsink");
// Check for element creation errors
if (!pipeline || !source || !capsfilter1 || !nvvidconv1 || !videoconvert || !appsink) {
g_printerr("Not all elements could be created. Exiting.\n");
return -1;
}
// Set element properties
g_object_set(G_OBJECT(capsfilter1),
"caps",
gst_caps_from_string("video/x-raw(memory:NVMM), width=1280, height=720, format=NV12, framerate=30/1"),
nullptr);
g_object_set(G_OBJECT(videoconvert), "caps", gst_caps_from_string("video/x-raw, format=BGR"), nullptr);
//g_object_set(G_OBJECT(queue), "max-size-time", 2000000000, "leaky", 2, "max-size-bytes",62914560 , "max-rate",1, nullptr);
// Configure appsink for efficient data retrieval
g_object_set(appsink, "emit-signals", TRUE, "sync", false, "max-buffers", 30, "drop",false, nullptr); //"samplesperbuffer",1024, "num-buffers",-1,"blocksize",-1, "wait-on-eos",false,
g_signal_connect(appsink, "new-sample", G_CALLBACK(on_new_sample), nullptr);
// Add elements to pipeline and link them
gst_bin_add_many(GST_BIN(pipeline), source, capsfilter1, nvvidconv1, videoconvert, appsink, nullptr);
gst_element_link_many(source, capsfilter1, nvvidconv1, videoconvert, appsink, nullptr);
// Add bus watch to listen for events
GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
gst_bus_add_watch(bus, (GstBusFunc)bus_call, loop);
gst_object_unref(bus);
// Set pipeline to PLAYING state
GstStateChangeReturn ret = gst_element_set_state(pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr("Unable to set the pipeline to the playing state. Exiting.\n");
gst_object_unref(pipeline);
return -1;
}
std::thread displayThread(displayFrame);
// Create GMainLoop for handling GStreamer events
loop = g_main_loop_new(nullptr, FALSE);
g_main_loop_run(loop);
displayThread.join();
// Clean up
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
g_main_loop_unref(loop);
return 0;
}```