gstreamer overlay alpha composite two webm VP9 files - gstreamer

What command do I need to overlay or alpha composite two vp9 webm video files on top of each other in gstreamer ?
inputs
foreground.webm
background.webm
output
foreground_over_background.webm
from reading the NVidia GStreamer documentation, page 46:-
https://developer.download.nvidia.com/embedded/L4T/r32_Release_v1.0/Docs/Accelerated_GStreamer_User_Guide.pdf?bfPUdrIaI1j8RVvuoCD405aKTnLM7iySjOBRny2F8HQScIv521n_UOFV0GbrblQAyN0mkVbRu1bWt9iY4l6xR7Tk5aI3wR-D8_YSvpUhWq1xqi6IrBZhuuZElW_n7i5wfLaPvko4MoqeXpjNTdYEqXNvbedG9a_leDcWNl-WK9QlceGJB-Q
I was hoping I could do it like this:-
from threading import Thread
import gi
from gi.repository import Gst, GLib
gi.require_version("Gst", "1.0")
Gst.init()
main_loop = GLib.MainLoop()
thread = Thread(target=main_loop.run)
thread.start()
#pipeline = Gst.parse_launch("v4l2src ! decodebin ! videoconvert ! autovideosink")
pipeline = Gst.parse_launch("nvcompositor name=comp "
"sink_0::xpos=0 sink_0::ypos=0 sink_0::width=188 sink_0::height=188 "
"sink_1::xpos=0 sink_1::ypos=0 sink_1::width=188 sink_1::height=188 "
"! nvoverlaysink display-id=1 "
"filesrc location =<blend_text_188.webm> ! matroskademux | omxvp9dec ! comp. -e "
"filesrc location =<gradient_188.webm> ! matroskademux | omxvp9dec ! comp. -e")
pipeline.set_state(Gst.State.PLAYING)
try:
while True:
sleep(0.1)
except KeyboardInterrupt:
pass
pipeline.set_state(Gst.State.NULL)
main_loop.quit()
However, I get this error message:-
.../gstreamer.py:3: PyGIWarning: Gst was imported without specifying a version first. Use gi.require_version('Gst', '1.0') before import to ensure that the right version gets loaded.
from gi.repository import Gst, GLib
Traceback (most recent call last):
File ".../gstreamer.py", line 10, in <module>
pipeline = Gst.parse_launch("nvcompositor name=comp "
gi.repository.GLib.GError: gst_parse_error: no element "nvcompositor" (1)
any idea why I get this error, and what I need to do to get this to overlay ?
so, I tried it this way
from threading import Thread
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
import os
os.environ['GST_DEBUG'] = '3'
from gi.repository import Gst, GLib, GstRtspServer
Gst.init()
main_loop = GLib.MainLoop()
thread = Thread(target=main_loop.run)
thread.start()
Gst.init(None)
port = "8554"
mount_point = "/test"
server = GstRtspServer.RTSPServer.new()
server.set_service(port)
mounts = server.get_mount_points()
factory = GstRtspServer.RTSPMediaFactory.new()
#pipeline = Gst.parse_launch("v4l2src ! decodebin ! videoconvert ! autovideosink")
factory.set_launch("nvcompositor name=comp "
"sink_0::xpos=0 sink_0::ypos=0 sink_0::width=188 sink_0::height=188 "
"sink_1::xpos=0 sink_1::ypos=0 sink_1::width=188 sink_1::height=188 "
"! nvoverlaysink display-id=1 "
"filesrc location =<blend_text_188.webm> ! matroskademux | omxvp9dec ! comp. -e "
"filesrc location =<gradient_188.webm> ! matroskademux | omxvp9dec ! comp. -e")
#pipeline.set_state(Gst.State.PLAYING)
# try:
# while True:
# sleep(0.1)
# except KeyboardInterrupt:
# pass
mounts.add_factory(mount_point, factory)
server.attach()
# pipeline.set_state(Gst.State.NULL)
# main_loop.quit()
But this also didn't do anything, unfortunately.
I added this post onto the NVidia forum aswell here:-
https://forums.developer.nvidia.com/t/alpha-compositing-one-webm-file-directly-over-another-webm-file-using-gstreamer/228404

Related

PyGst / GStreamer not playing audio, commandline OK

I am new to GObject , GStreamer , GI etc.
I've mac running high-sierra.
While I am able to run a test audio file successfully as below.
gst-launch-1.0 filesrc location=test.mp3 ! decodebin ! audioconvert ! autoaudiosink
I am not able to simulate same within a python code.
It's returning following error
python ccc.py
<Gst.Message object at 0x10ebb59a8 (GstMessage at 0x7fde5688b740)>
<flags GST_MESSAGE_ERROR of type Gst.MessageType>
(gerror=GLib.Error('Internal data stream error.', 'gst-stream-error-quark', 1), debug='gstbaseparse.c(3611): void gst_base_parse_loop(GstPad *) (): /GstPipeline:pipeline0/GstDecodeBin:decodebin/GstMpegAudioParse:mpegaudioparse0:\nstreaming stopped, reason not-linked (-1)')
Code
#!/usr/bin/python
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, GObject
from gi.repository import Gst as gst
#Initialize Go Objects
GObject.threads_init()
gst.init(None)
# Create the pipeline for our elements.
pipe = gst.Pipeline()
source = gst.ElementFactory.make("filesrc", "file-source")
source.set_property("location", "test.wav")
decoder = gst.ElementFactory.make("decodebin","decodebin")
converter = gst.ElementFactory.make("audioconvert","audioconvert")
audiosink = gst.ElementFactory.make("autoaudiosink", "audiosink")
# Ensure all elements were created successfully.
if (not pipe or not source or not decoder or not audiosink):
print('Not all elements could be created.')
exit(-1)
#Add elements to pipeline
pipe.add(source)
pipe.add(decoder)
pipe.add(converter)
pipe.add(audiosink)
#Link our elements together.
source.link(decoder)
decoder.link(converter)
converter.link(audiosink)
# Set our pipelines state to Playing.
pipe.set_state(gst.State.PLAYING)
# Wait until error or EOS.
bus = pipe.get_bus()
msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,gst.MessageType.ERROR | gst.MessageType.EOS)
print msg
print msg.type
print msg.parse_error()
# Free resources.
pipe.set_state(gst.State.NULL)
Any pointers? Thanks.
Try using the Gst.parse_launch, to directly input your complete pipeline in the same way as you do in the command line.
Eg:
PIPE = """ filesrc location=test.mp3 ! decodebin ! audioconvert ! autoaudiosink """
And then:
pipeline = Gst.parse_launch(PIPE)
This is much easier than adding it individually and linking them to pipeline.

How to change the currently playing file in a gstreamer pipeline without restarting the pipeline?

For some reason we have had trouble finding working solutions to this problem online.
Right now our source is a filesrc element. Our current non-solution is to change the pipeline's state to READY, change the location property, and set the pipeline's state to PLAYING (see the change_song method below).
This gives the following error:
CHANGING SONG TO music2.mp3
('ERROR:', 'filesource', ':', 'Internal data stream error.')
('debugging info:', 'gstbasesrc.c(2950): gst_base_src_loop (): /GstPipeline:pipeline0/GstFileSrc:filesource:\nstreaming stopped, reason not-linked (-1)')
We hear the first song playing, but the pipeline crashes when attempting to re-enter the PLAYING state. This is our code:
import gi
import time
import threading
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject, GLib
class Server:
def __init__(self):
self.pipeline = None
bus = None
message = None
# initialize GStreamer
Gst.init(None)
# build the pipeline
self.pipeline = Gst.parse_launch('filesrc name=filesource ! queue name=queueelement ! decodebin ! audioconvert ! audioresample ! opusenc ! rtpopuspay ! udpsink port=40401 host=224.1.1.1')
self.filesrc = self.pipeline.get_by_name("filesource")
self.filesrc.set_property("location", "music.mp3")
self.queue = self.pipeline.get_by_name("queueelement")
def hang(self):
# wait until EOS or error
bus = self.pipeline.get_bus()
msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.ERROR | Gst.MessageType.EOS)
if msg:
t = msg.type
if t == Gst.MessageType.ERROR:
err, dbg = msg.parse_error()
print("ERROR:", msg.src.get_name(), ":", err.message)
if dbg:
print("debugging info:", dbg)
elif t == Gst.MessageType.EOS:
print("End-Of-Stream reached")
else:
# this should not happen. we only asked for ERROR and EOS
print("ERROR: Unexpected message received.")
# free resources
self.pipeline.set_state(Gst.State.NULL)
def start(self):
# start playing
self.pipeline.set_state(Gst.State.PLAYING)
t = threading.Thread(target=self.hang)
t.start()
def change_song(self, song_name):
print("CHANGING SONG TO " + song_name)
self.pipeline.set_state(Gst.State.READY)
self.filesrc.set_property("location", song_name)
self.pipeline.set_state(Gst.State.PLAYING)
s = Server()
s.start()
time.sleep(5)
s.change_song("music2.mp3")
We have also tried the following:
def change_song(self, song_name):
print("CHANGING SONG TO " + song_name)
self.pipeline.set_state(Gst.State.READY)
self.filesrc.unlink(queue)
self.filesrc.set_property("location", song_name)
self.filesrc.link(queue)
self.pipeline.set_state(Gst.State.PLAYING)
but this gives the same error.

video streaming using gstreamer with python

I am new to gstreamer and want to live streaming from a webcam using gstreamer with python.
How can I do it?
If anyone has a related article or source code please share.
I have tried this code:
Vidserver.py
import gobject, pygst
pygst.require("0.10")
import gst
source pad
def new_decode_pad(dbin, pad, islast):
pad.link(decode.get_pad("xvimagesink"))
pipeline = gst.Pipeline("server")
tcpsrc = gst.element_factory_make("tcpserversrc", "source")
pipeline.add(tcpsrc)
tcpsrc.set_property("host", "localhost")
tcpsrc.set_property("port", 5000)
decode = gst.element_factory_make("decodebin", "decode")
decode.connect("new-decoded-pad", new_decode_pad)
pipeline.add(decode)
tcpsrc.link(decode)
def new_xvimage_pad(ximg, pad, islast):
xvimage = gst.element_factory_make("xvimagesink", "imagesink")
xvimage.connect("new-xvimaged-pad", new_xvimage_pad)
pipeline.add(xvimage)
decode.link(xvimage)
pipeline.set_state(gst.STATE_PLAYING)
loop = gobject.MainLoop()
loop.run()
Vidclient.py
import gobject, pygst
pygst.require("0.10")
import gst
# create a pipeline and add [ filesrc ! tcpclientsink ]
pipeline = gst.Pipeline("client")
src = gst.element_factory_make("filesrc", "source")
src.set_property("location", "/home/sam/Desktop/ramstien.mpg")
pipeline.add(src)
def new_mux_pad(mux, pad, islast):
pad.link(mux.get_pad("mpeg"))
mux = gst.element_factory_make("dvddemux", "mux")
mux.connect("new-muxed-pad", new_mux_pad)
pipeline.add(mux)
src.link(mux)
def new_mpeg_pad(mpg, pad, islast):
pad.link(mpeg.get_pad("theora"))
mpeg = gst.element_factory_make("mpeg2dec", "mpeg")
mpeg.connect("new-mpeged-pad", new_mpeg_pad)
pipeline.add(mpeg)
mux.link(mpeg)
def new_theoraenc_pad(thr, pad, islast):
pad.link(theora.get_pad("ogg"))
theora = gst.element_factory_make("theoraenc", "theora")
theora.connect("new-theoraenced-pad", new_theoraenc_pad)
pipeline.add(theora)
mpeg.link(theora)
def new_ogg_pad(og, pad, islast):
ogg = gst.element_factory_make("oggmux", "ogg")
ogg.connect("new-ogged-pad", new_ogg_pad)
pipeline.add(ogg)
theora.link(ogg)
client = gst.element_factory_make("tcpclientsink", "client")
pipeline.add(client)
client.set_property("host", "localhost")
client.set_property("port", 5000)
ogg.link(client)
pipeline.set_state(gst.STATE_PLAYING)
# enter into a mainloop
loop = gobject.MainLoop()
loop.run()
can anyone please guide me what will be the python code for the gstreamer
server command
gst-launch-1.0 -v ximagesrc use-damage=false xname=/usr/lib/torcs/torcs-bin ! videoconvert ! videoscale ! video/x-raw,format=I420,width=800,height=600,framerate=25/1 ! jpegenc ! rtpjpegpay ! udpsink host=127.0.0.1 port=5000
and the gstreamer client command
gst-launch-1.0 udpsrc port=5000 ! application/x-rtp,encoding-name=JPEG,payload=26 ! rtpjpegdepay ! jpegdec ! autovideosink
for mjpeg encoding.

GStreamer 1.0 - mixing audio w/adder or audiomixer renders no output

Any help would be greatly appreciated since I've been at this for 2 days. The code looks fine but obviously I'm doing something incorrectly. I've tried adder on 1.2.4 and audiomixer on 1.7.1. Debug logs say the pipeline and it's elements are in paused state when the code exits.
!/usr/bin/python
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject
from gi.repository import Gst as gst
GObject.threads_init()
gst.init(None)
if __name__ == "__main__":
# First create our pipeline
# filesrc ! mad ! audioconvert ! audioresample pipe =
gst.Pipeline("mypipe")
# Create the file source 1
filesrc1 = gst.ElementFactory.make("filesrc","filesrc1")
filesrc1.set_property('location', "test1.mp3")
pipe.add(filesrc1)
mad_convert1 = gst.ElementFactory.make("mad","mad_convert1")
pipe.add(mad_convert1)
filesrc1.link(mad_convert1)
audio_convert1 = gst.ElementFactory.make("audioconvert","audio_convert1")
pipe.add(audio_convert1)
mad_convert1.link(audio_convert1)
audio_resample1 = gst.ElementFactory.make("audioresample","audio_resample1")
pipe.add(audio_resample1)
audio_convert1.link(audio_resample1)
# Create the file source 2
filesrc2 = gst.ElementFactory.make("filesrc","filesrc2")
filesrc2.set_property('location', "test2.mp3")
pipe.add(filesrc2)
mad_convert2 = gst.ElementFactory.make("mad","mad_convert2")
pipe.add(mad_convert2);
filesrc2.link(mad_convert2)
audio_convert2 = gst.ElementFactory.make("audioconvert","audio_convert2")
pipe.add(audio_convert2)
mad_convert2.link(audio_convert2)
audio_resample2 = gst.ElementFactory.make("audioresample","audio_resample2")
pipe.add(audio_resample2)
audio_convert2.link(audio_resample2)
# Create a software mixer with "Adder"
adder = gst.ElementFactory.make("audiomixer","audiomixer")
pipe.add(adder)
# Gather a request sink pad on the mixer
sinkpad1=adder.get_request_pad("sink_%u")
# Get an another request sink pad on the mixer
sinkpad2=adder.get_request_pad("sink_%u")
# .. and connect it's source pad to the previously gathered request pad
audio_resample1_src=audio_resample1.get_static_pad("src")
audio_resample1_src.link(sinkpad1)
audio_resample2_src = audio_resample2.get_static_pad("src")
audio_resample2_src.link(sinkpad2)
# Add some output
audio_convert_out = gst.ElementFactory.make("audioconvert","audio_convert_out")
pipe.add(audio_convert_out)
output = gst.ElementFactory.make("filesink", "audio_out")
output.set_property('location', "test_out.mp3")
pipe.add(output)
Your output would not be an 'mp3', but raw audio since you don't reencode after mixing.
Also I'd recommend to check the return values of all the link() calls
And most importantly you never set the pipeline to playing.

gstreamer - Wadsworth's constant thumbnailer

I'm trying to build a video thumbnailer using gst-python, it looks like this.
from __future__ import division
import sys
import logging
import pdb
_log = logging.getLogger(__name__)
logging.basicConfig()
_log.setLevel(logging.DEBUG)
try:
import gobject
gobject.threads_init()
except:
raise Exception('gobject could not be found')
try:
import pygst
pygst.require('0.10')
import gst
from gst import pbutils
from gst.extend import discoverer
except:
raise Exception('gst/pygst 0.10 could not be found')
class VideoThumbnailer:
'''
Creates a video thumbnail
- Sets up discoverer & transcoding pipeline.
Discoverer finds out information about the media file
- Launches gobject.MainLoop, this triggers the discoverer to start running
- Once the discoverer is done, it calls the __discovered callback function
- The __discovered callback function launches the transcoding process
- The _on_message callback is called from the transcoding process until it
gets a message of type gst.MESSAGE_EOS, then it calls __stop which shuts
down the gobject.MainLoop
'''
def __init__(self, src, dst, **kwargs):
_log.info('Initializing VideoThumbnailer...')
# Set instance variables
self.loop = gobject.MainLoop()
self.source_path = src
self.destination_path = dst
self.destination_dimensions = kwargs.get('dimensions') or (180, 180)
if not type(self.destination_dimensions) == tuple:
raise Exception('dimensions must be tuple: (width, height)')
# Run setup
self._setup()
# Run.
self._run()
def _setup(self):
self._setup_pipeline()
self._setup_discover()
def _run(self):
_log.info('Discovering...')
self.discoverer.discover()
_log.info('Done')
_log.debug('Initializing MainLoop()')
self.loop.run()
def _setup_discover(self):
self.discoverer = discoverer.Discoverer(self.source_path)
# Connect self.__discovered to the 'discovered' event
self.discoverer.connect('discovered', self.__discovered)
def __discovered(self, data, is_media):
'''
Callback for media discoverer.
'''
if not is_media:
self.__stop()
raise Exception('Could not discover {0}'.format(self.source_path))
_log.debug('__discovered, data: {0}'.format(data))
self.data = data
# Run any tasks that depend on the info from the discovery
self._on_discovered()
# Tell the transcoding pipeline to start running
#self.pipeline.set_state(gst.STATE_PLAYING)
_log.info('Transcoding...')
def _on_discovered(self):
self.__setup_capsfilter()
def _setup_pipeline(self):
# Create a new pipeline
self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')
# Create the elements in the pipeline
self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
self.filesrc.set_property('location', self.source_path)
self.pipeline.add(self.filesrc)
self.decoder = gst.element_factory_make('decodebin2', 'decoder')
self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
self.pipeline.add(self.decoder)
self.ffmpegcolorspace = gst.element_factory_make(
'ffmpegcolorspace', 'ffmpegcolorspace')
self.pipeline.add(self.ffmpegcolorspace)
self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
self.videoscale.set_property('method', 'bilinear')
self.pipeline.add(self.videoscale)
self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
self.pipeline.add(self.capsfilter)
self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
self.pipeline.add(self.jpegenc)
self.filesink = gst.element_factory_make('filesink', 'filesink')
self.filesink.set_property('location', self.destination_path)
self.pipeline.add(self.filesink)
# Link all the elements together
self.filesrc.link(self.decoder)
self.ffmpegcolorspace.link(self.videoscale)
self.videoscale.link(self.capsfilter)
self.capsfilter.link(self.jpegenc)
self.jpegenc.link(self.filesink)
self._setup_bus()
def _on_dynamic_pad(self, dbin, pad, islast):
'''
Callback called when ``decodebin2`` has a pad that we can connect to
'''
# Intersect the capabilities of the video sink and the pad src
# Then check if they have common capabilities.
if not self.ffmpegcolorspace.get_pad_template('sink')\
.get_caps().intersect(pad.get_caps()).is_empty():
# It IS a video src pad.
pad.link(self.ffmpegcolorspace.get_pad('sink'))
def _setup_bus(self):
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message', self._on_message)
def __setup_capsfilter(self):
caps = ['video/x-raw-rgb']
if self.data.videoheight > self.data.videowidth:
# Whoa! We have ourselves a portrait video!
caps.append('height={0}'.format(
self.destination_dimensions[1]))
else:
# It's a landscape, phew, how normal.
caps.append('width={0}'.format(
self.destination_dimensions[0]))
self.capsfilter.set_property(
'caps',
gst.caps_from_string(
', '.join(caps)))
def _on_message(self, bus, message):
_log.debug((bus, message))
t = message.type
if t == gst.MESSAGE_EOS:
self.__stop()
_log.info('Done')
elif t == gst.MESSAGE_ERROR:
_log.error((bus, message))
self.__stop()
def __stop(self):
_log.debug(self.loop)
self.pipeline.set_state(gst.STATE_NULL)
gobject.idle_add(self.loop.quit)
What it does
filesrc loads a video file
decodebin2 demuxes the video file, connects the video src pad to the ffmpegcolorspace sink
ffmpegcolorspace does whatever it does with the color spaces of the video stream
videoscale scales the video
capsfilter tells videoscale to make the video fit in a 180x180 box
jpegenc captures a single frame
filesink saves the jpeg file
What I want it to do
filesrc loads a video file
decodebin2 demuxes the video file, connects the video src pad to the ffmpegcolorspace sink
ffmpegcolorspace does whatever it does with the color spaces of the video stream
videoscale scales the video
capsfilter tells videoscale to make the video fit in a 180x180 box
jpegenc captures a single frame AT 30% INTO THE VIDEO
filesink saves the jpeg file
I've tried with
self.decoder.seek_simple(
gst.FORMAT_PERCENT,
gst.SEEK_FLAG_FLUSH,
self.WADSWORTH_CONSTANT) # int(30)
placed in _on_dynamic_pad, after pad linking, alas to no avail.
This is now implemented in the VideoThumbnailer class in https://github.com/jwandborg/mediagoblin/blob/video_gstreamer-only/mediagoblin/media_types/video/transcoders.py#L53.