Hi, I have previously asked about how to stream video data using gstreamer to replace Raspberry Pi with a Jetson Nano here.
After reading the provided solution and the other recommendations/info I tried out streaming video data(provided by a BR Low Light USB Camera) to an udpsink with gstreamer and then receiving it by a Python code. The problem I have encountered is that when I run the Python program to receive the incoming data the OpenCV output looks blocky and has a high delay(approx. 1.3 seconds). I thought this may be caused by sending the data over an UDP connection so I removed the transportation part and merged the 2 gstreamer commands in to a single one for testing. When I ran the code I again encountered the same problem. To fix the problem I tried building opencv with cuda, tried out different BR cameras, tried out Raspbery Pi cameras, restarted the Jetson, increased the swap page size to 4 GB but nothing has worked so far.
My main guesses to why the video is blocky are:
- The appsink may be causing a decrease in video quality therefore causing the blockyness
- The gst_to_opencv function may be faulty
- The hardware may, in some way, be interfering with the video data
- GStreamer is having a problem streaming the camera data
Here is the ArduSub’s example Python code that has been edited by me(nearly no difference):
import cv2
import gi
import numpy as np
gi.require_version('Gst', '1.0')
from gi.repository import Gst
print(cv2.cuda.getCudaEnabledDeviceCount())
class Video():
"""BlueRov video capture class constructor
Attributes:
port (int): Video UDP port
video_codec (string): Source h264 parser
video_decode (string): Transform YUV (12bits) to BGR (24bits)
video_pipe (object): GStreamer top-level pipeline
video_sink (object): Gstreamer sink element
video_sink_conf (string): Sink configuration
video_source (string): Udp source ip and port
Full original command: udpsrc port=5600 ! application/x-rtp, payload=96 ! rtph264depay ! h264parse ! avdec_h264 ! decodebin ! videoconvert ! video/x-raw, format=(string)BGR ! videoconvert !
appsink emit-signals=true sync=false max-buffers=2 drop=true
"""
def __init__(self):
Gst.init(None)
self.port = 5600
self._frame = None
# [Software component diagram](https://www.ardusub.com/software/components.html)
# UDP video stream (:5600)
self.video_source = 'udpsrc port={}'.format(self.port)
# [Rasp raw image](http://picamera.readthedocs.io/en/release-0.7/recipes2.html#raw-image-capture-yuv-format)
# Cam -> CSI-2 -> H264 Raw (YUV 4-4-4 (12bits) I420)
self.video_codec = '! application/x-rtp, payload=96 ! rtph264depay ! h264parse ! avdec_h264'
# Python don't have nibble, convert YUV nibbles (4-4-4) to OpenCV standard BGR bytes (8-8-8)
self.video_decode = \
'! decodebin ! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert'
# Create a sink to get data
self.video_sink_conf = \
'! appsink emit-signals=true sync=false max-buffers=2 drop=true'
self.video_pipe = None
self.video_sink = None
self.run()
def start_gst(self, config=None):
""" Start gstreamer pipeline and sink
Args:
config (list, optional): Gstreamer pileline description list
"""
config1= 'v4l2src device=/dev/video1 do-timestamp=true ! video/x-h264, width=800, height= 600, framerate=30/1 ! ' + \
'h264parse ! queue ! rtph264pay config-interval=10 pt=96 ! application/x-rtp, payload=96 ! rtph264depay ! h264parse ' + \
'! avdec_h264 ! decodebin ! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert ! appsink drop=true sync=false ' + \
'max-buffers=2 emit-signals=true'
self.video_pipe = Gst.parse_launch(config1)
self.video_pipe.set_state(Gst.State.PLAYING)
self.video_sink = self.video_pipe.get_by_name('appsink0')
@staticmethod
def gst_to_opencv(sample):
"""Transform byte array into np array
Args:
sample (TYPE): Description
Returns:
TYPE: Description
"""
buf = sample.get_buffer()
caps = sample.get_caps()
array = np.ndarray(
(
caps.get_structure(0).get_value('height'),
caps.get_structure(0).get_value('width'),
3
),
buffer=buf.extract_dup(0, buf.get_size()), dtype=np.uint8)
return array
def frame(self):
""" Get Frame
Returns:
iterable: bool and image frame, cap.read() output
"""
return self._frame
def frame_available(self):
return type(self._frame) != type(None)
def run(self):
""" Get frame to update _frame
"""
self.start_gst(
[
self.video_source,
self.video_codec,
self.video_decode,
self.video_sink_conf
])
self.video_sink.connect('new-sample', self.callback)
def callback(self, sink):
sample = sink.emit('pull-sample')
new_frame = self.gst_to_opencv(sample)
self._frame = new_frame
return Gst.FlowReturn.OK
if __name__ == '__main__':
# Create the video object
video = Video()
while True:
# Wait for the next frame
if not video.frame_available():
print("Frame not avaliable")
continue
frame = video.frame()
print("Frame avaliable")
cv2.imshow('frame', frame)
if cv2.waitKey(35) & 0xFF == ord('q'):
break
I have also tested this code on different hardware(i.e. on a Monster laptop and on a different monitor with the same Jetson Nano plugged in) and didn’t encounter the same problem.
I have also used this Python code to test if the problem was launching gstreamer pipelines from Python(I did not encounter any problems):
import cv2
import gi
import numpy as np
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
print(cv2.cuda.getCudaEnabledDeviceCount())
Gst.init(None)
main = GLib.MainLoop()
config0= "v4l2src device=/dev/video1 ! h264parse ! avdec_h264 ! video/x-raw, width=800, height=600, framerate=30/1 ! xvimagesink"
config1= "v4l2src device=/dev/video1 do-timestamp=true ! video/x-h264, width=800, height= 600, framerate=30/1 ! h264parse ! queue ! rtph264pay config-interval=10 pt=96 ! application/x-rtp, payload=96 ! rtph264depay ! h264parse ! avdec_h264 ! decodebin ! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert ! xvimagesink"
pipeline = Gst.parse_launch(config0)
pipeline.set_state(Gst.State.PLAYING)
main.run()
I have also ran the following commands to test if there was an error on the gstreamer side(I did not encounter any quality decrease and blockyness):
# sender
gst-launch-1.0 v4l2src device=/dev/video1 do-timestamp=true ! video/x-h264, width=800, height=600, framerate=30/1 ! h264parse ! queue ! rtph264pay config-interval=10 pt=96 ! udpsink host=127.0.0.1 port=5600
# reciever
gst-launch-1.0 udpsrc port=5600 ! application/x-rtp, payload=96 ! rtph264depay ! h264parse ! avdec_h264 ! decodebin ! videoconvert ! 'video/x-raw, format=(string)BGR' ! videoconvert ! xvimagesink
I would appreciate any explanations or solutions that may help me fix this problem.