How to convert each frame obtained by cap.VideoCapture(0) to RTSP output?
Hi @frost,
Hmm, maybe you can try using gstreamer appsrc for that?
You can define a gst pipeline that receives your buffers and push them out, something like this (FYI I didn’t test it out is just pseudocode)
pipeline_str = "appsrc ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=ultrafast ! rtph264pay config-interval=1 pt=96 ! rtspclientsink location=rtsp://127.0.0.1:8554/test"
# Create the GStreamer pipeline
pipeline = Gst.parse_launch(pipeline_str)
appsrc = pipeline.get_by_name("appsrc")
pipeline.set_state(Gst.State.PLAYING)
Then when you have a frame:
# Convert frame for gst
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
data = frame_rgb.tobytes()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
# Push the buffer to the appsrc element
appsrc.emit("push-buffer", buf)
Regards,
Bob,
support@proventusnova.com
感谢您的回复。我尝试使用以上思想构建了如下代码:
cam_rtsp = 'rtsp://admin:admin123@192.168.1.108:554/cam/realmonitor?channel=1&subtype=0'
import cv2
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject, GLib
class SensorFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self, **properties):
super(SensorFactory, self).__init__(**properties)
self.cap = cv2.VideoCapture(cam_rtsp)
self.number_frames = 0
self.fps = 25
self.duration = 1 / self.fps * Gst.SECOND # duration of a frame in nanoseconds
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \
f'caps=video/x-raw,format=BGR,width={self.width},height={self.height},framerate={self.fps}/1 ' \
f'! autovideoconvert ! video/x-raw,format=I420,width={self.width},height={self.height},framerate={self.fps}/1 ' \
'! x264enc speed-preset=ultrafast tune=zerolatency bitrate=500 ' \
'! rtph264pay config-interval=1 name=pay0 pt=96'
def on_need_data(self, src, lenght):
try:
if self.cap.isOpened():
ret, frame = self.cap.read(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)
if ret:
# 调整图像大小
frame = cv2.resize(frame, (self.width, self.height))
data = frame.tobytes()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = self.duration
timestamp = self.number_frames * self.duration
buf.pts = buf.dts = int(timestamp)
buf.offset = timestamp
self.number_frames += 1
retval = src.emit('push-buffer', buf)
print('pushed buffer, frame {}, timestamp {}, durations {} s'.format(self.number_frames,
timestamp,
self.duration / Gst.SECOND))
if retval != Gst.FlowReturn.OK:
print(retval)
except StopIteration:
# self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
pass
def do_create_element(self, url):
return Gst.parse_launch(self.launch_string)
def do_configure(self, rtsp_media):
self.number_frames = 0
appsrc = rtsp_media.get_element().get_child_by_name('source')
appsrc.connect('need-data', self.on_need_data)
class GstServer(GstRtspServer.RTSPServer):
def __init__(self, **properties):
super(GstServer, self).__init__(**properties)
self.factory = SensorFactory()
self.factory.set_shared(True)
self.get_mount_points().add_factory("/test", self.factory)
self.attach(None)
if __name__ == "__main__":
Gst.init(None)
server = GstServer()
loop = GLib.MainLoop()
loop.run()
此时,运行变得非常卡顿。当加载 1080p 清晰度的视频时,延迟高达 30 秒。如何降低延迟?
all of that DOES NOT NEED OpenCV at all.
you can do all of that with nothing but gstreamer.
also please write in English. this is an English-speaking forum.