I'm trying to build a video thumbnailer using gst-python, it looks like this.
from __future__ import division
import sys
import logging
import pdb
_log = logging.getLogger(__name__)
logging.basicConfig()
_log.setLevel(logging.DEBUG)
try:
import gobject
gobject.threads_init()
except:
raise Exception('gobject could not be found')
try:
import pygst
pygst.require('0.10')
import gst
from gst import pbutils
from gst.extend import discoverer
except:
raise Exception('gst/pygst 0.10 could not be found')
class VideoThumbnailer:
'''
Creates a video thumbnail
- Sets up discoverer & transcoding pipeline.
Discoverer finds out information about the media file
- Launches gobject.MainLoop, this triggers the discoverer to start running
- Once the discoverer is done, it calls the __discovered callback function
- The __discovered callback function launches the transcoding process
- The _on_message callback is called from the transcoding process until it
gets a message of type gst.MESSAGE_EOS, then it calls __stop which shuts
down the gobject.MainLoop
'''
def __init__(self, src, dst, **kwargs):
_log.info('Initializing VideoThumbnailer...')
# Set instance variables
self.loop = gobject.MainLoop()
self.source_path = src
self.destination_path = dst
self.destination_dimensions = kwargs.get('dimensions') or (180, 180)
if not type(self.destination_dimensions) == tuple:
raise Exception('dimensions must be tuple: (width, height)')
# Run setup
self._setup()
# Run.
self._run()
def _setup(self):
self._setup_pipeline()
self._setup_discover()
def _run(self):
_log.info('Discovering...')
self.discoverer.discover()
_log.info('Done')
_log.debug('Initializing MainLoop()')
self.loop.run()
def _setup_discover(self):
self.discoverer = discoverer.Discoverer(self.source_path)
# Connect self.__discovered to the 'discovered' event
self.discoverer.connect('discovered', self.__discovered)
def __discovered(self, data, is_media):
'''
Callback for media discoverer.
'''
if not is_media:
self.__stop()
raise Exception('Could not discover {0}'.format(self.source_path))
_log.debug('__discovered, data: {0}'.format(data))
self.data = data
# Run any tasks that depend on the info from the discovery
self._on_discovered()
# Tell the transcoding pipeline to start running
#self.pipeline.set_state(gst.STATE_PLAYING)
_log.info('Transcoding...')
def _on_discovered(self):
self.__setup_capsfilter()
def _setup_pipeline(self):
# Create a new pipeline
self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')
# Create the elements in the pipeline
self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
self.filesrc.set_property('location', self.source_path)
self.pipeline.add(self.filesrc)
self.decoder = gst.element_factory_make('decodebin2', 'decoder')
self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
self.pipeline.add(self.decoder)
self.ffmpegcolorspace = gst.element_factory_make(
'ffmpegcolorspace', 'ffmpegcolorspace')
self.pipeline.add(self.ffmpegcolorspace)
self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
self.videoscale.set_property('method', 'bilinear')
self.pipeline.add(self.videoscale)
self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
self.pipeline.add(self.capsfilter)
self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
self.pipeline.add(self.jpegenc)
self.filesink = gst.element_factory_make('filesink', 'filesink')
self.filesink.set_property('location', self.destination_path)
self.pipeline.add(self.filesink)
# Link all the elements together
self.filesrc.link(self.decoder)
self.ffmpegcolorspace.link(self.videoscale)
self.videoscale.link(self.capsfilter)
self.capsfilter.link(self.jpegenc)
self.jpegenc.link(self.filesink)
self._setup_bus()
def _on_dynamic_pad(self, dbin, pad, islast):
'''
Callback called when ``decodebin2`` has a pad that we can connect to
'''
# Intersect the capabilities of the video sink and the pad src
# Then check if they have common capabilities.
if not self.ffmpegcolorspace.get_pad_template('sink')\
.get_caps().intersect(pad.get_caps()).is_empty():
# It IS a video src pad.
pad.link(self.ffmpegcolorspace.get_pad('sink'))
def _setup_bus(self):
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message', self._on_message)
def __setup_capsfilter(self):
caps = ['video/x-raw-rgb']
if self.data.videoheight > self.data.videowidth:
# Whoa! We have ourselves a portrait video!
caps.append('height={0}'.format(
self.destination_dimensions[1]))
else:
# It's a landscape, phew, how normal.
caps.append('width={0}'.format(
self.destination_dimensions[0]))
self.capsfilter.set_property(
'caps',
gst.caps_from_string(
', '.join(caps)))
def _on_message(self, bus, message):
_log.debug((bus, message))
t = message.type
if t == gst.MESSAGE_EOS:
self.__stop()
_log.info('Done')
elif t == gst.MESSAGE_ERROR:
_log.error((bus, message))
self.__stop()
def __stop(self):
_log.debug(self.loop)
self.pipeline.set_state(gst.STATE_NULL)
gobject.idle_add(self.loop.quit)
What it does
filesrc loads a video file
decodebin2 demuxes the video file, connects the video src pad to the ffmpegcolorspace sink
ffmpegcolorspace does whatever it does with the color spaces of the video stream
videoscale scales the video
capsfilter tells videoscale to make the video fit in a 180x180 box
jpegenc captures a single frame
filesink saves the jpeg file
What I want it to do
filesrc loads a video file
decodebin2 demuxes the video file, connects the video src pad to the ffmpegcolorspace sink
ffmpegcolorspace does whatever it does with the color spaces of the video stream
videoscale scales the video
capsfilter tells videoscale to make the video fit in a 180x180 box
jpegenc captures a single frame AT 30% INTO THE VIDEO
filesink saves the jpeg file
I've tried with
self.decoder.seek_simple(
gst.FORMAT_PERCENT,
gst.SEEK_FLAG_FLUSH,
self.WADSWORTH_CONSTANT) # int(30)
placed in _on_dynamic_pad, after pad linking, alas to no avail.
This is now implemented in the VideoThumbnailer class in https://github.com/jwandborg/mediagoblin/blob/video_gstreamer-only/mediagoblin/media_types/video/transcoders.py#L53.
Related
I am new to GObject , GStreamer , GI etc.
I've mac running high-sierra.
While I am able to run a test audio file successfully as below.
gst-launch-1.0 filesrc location=test.mp3 ! decodebin ! audioconvert ! autoaudiosink
I am not able to simulate same within a python code.
It's returning following error
python ccc.py
<Gst.Message object at 0x10ebb59a8 (GstMessage at 0x7fde5688b740)>
<flags GST_MESSAGE_ERROR of type Gst.MessageType>
(gerror=GLib.Error('Internal data stream error.', 'gst-stream-error-quark', 1), debug='gstbaseparse.c(3611): void gst_base_parse_loop(GstPad *) (): /GstPipeline:pipeline0/GstDecodeBin:decodebin/GstMpegAudioParse:mpegaudioparse0:\nstreaming stopped, reason not-linked (-1)')
Code
#!/usr/bin/python
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, GObject
from gi.repository import Gst as gst
#Initialize Go Objects
GObject.threads_init()
gst.init(None)
# Create the pipeline for our elements.
pipe = gst.Pipeline()
source = gst.ElementFactory.make("filesrc", "file-source")
source.set_property("location", "test.wav")
decoder = gst.ElementFactory.make("decodebin","decodebin")
converter = gst.ElementFactory.make("audioconvert","audioconvert")
audiosink = gst.ElementFactory.make("autoaudiosink", "audiosink")
# Ensure all elements were created successfully.
if (not pipe or not source or not decoder or not audiosink):
print('Not all elements could be created.')
exit(-1)
#Add elements to pipeline
pipe.add(source)
pipe.add(decoder)
pipe.add(converter)
pipe.add(audiosink)
#Link our elements together.
source.link(decoder)
decoder.link(converter)
converter.link(audiosink)
# Set our pipelines state to Playing.
pipe.set_state(gst.State.PLAYING)
# Wait until error or EOS.
bus = pipe.get_bus()
msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,gst.MessageType.ERROR | gst.MessageType.EOS)
print msg
print msg.type
print msg.parse_error()
# Free resources.
pipe.set_state(gst.State.NULL)
Any pointers? Thanks.
Try using the Gst.parse_launch, to directly input your complete pipeline in the same way as you do in the command line.
Eg:
PIPE = """ filesrc location=test.mp3 ! decodebin ! audioconvert ! autoaudiosink """
And then:
pipeline = Gst.parse_launch(PIPE)
This is much easier than adding it individually and linking them to pipeline.
For some reason we have had trouble finding working solutions to this problem online.
Right now our source is a filesrc element. Our current non-solution is to change the pipeline's state to READY, change the location property, and set the pipeline's state to PLAYING (see the change_song method below).
This gives the following error:
CHANGING SONG TO music2.mp3
('ERROR:', 'filesource', ':', 'Internal data stream error.')
('debugging info:', 'gstbasesrc.c(2950): gst_base_src_loop (): /GstPipeline:pipeline0/GstFileSrc:filesource:\nstreaming stopped, reason not-linked (-1)')
We hear the first song playing, but the pipeline crashes when attempting to re-enter the PLAYING state. This is our code:
import gi
import time
import threading
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject, GLib
class Server:
def __init__(self):
self.pipeline = None
bus = None
message = None
# initialize GStreamer
Gst.init(None)
# build the pipeline
self.pipeline = Gst.parse_launch('filesrc name=filesource ! queue name=queueelement ! decodebin ! audioconvert ! audioresample ! opusenc ! rtpopuspay ! udpsink port=40401 host=224.1.1.1')
self.filesrc = self.pipeline.get_by_name("filesource")
self.filesrc.set_property("location", "music.mp3")
self.queue = self.pipeline.get_by_name("queueelement")
def hang(self):
# wait until EOS or error
bus = self.pipeline.get_bus()
msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.ERROR | Gst.MessageType.EOS)
if msg:
t = msg.type
if t == Gst.MessageType.ERROR:
err, dbg = msg.parse_error()
print("ERROR:", msg.src.get_name(), ":", err.message)
if dbg:
print("debugging info:", dbg)
elif t == Gst.MessageType.EOS:
print("End-Of-Stream reached")
else:
# this should not happen. we only asked for ERROR and EOS
print("ERROR: Unexpected message received.")
# free resources
self.pipeline.set_state(Gst.State.NULL)
def start(self):
# start playing
self.pipeline.set_state(Gst.State.PLAYING)
t = threading.Thread(target=self.hang)
t.start()
def change_song(self, song_name):
print("CHANGING SONG TO " + song_name)
self.pipeline.set_state(Gst.State.READY)
self.filesrc.set_property("location", song_name)
self.pipeline.set_state(Gst.State.PLAYING)
s = Server()
s.start()
time.sleep(5)
s.change_song("music2.mp3")
We have also tried the following:
def change_song(self, song_name):
print("CHANGING SONG TO " + song_name)
self.pipeline.set_state(Gst.State.READY)
self.filesrc.unlink(queue)
self.filesrc.set_property("location", song_name)
self.filesrc.link(queue)
self.pipeline.set_state(Gst.State.PLAYING)
but this gives the same error.
I am new to gstreamer and want to live streaming from a webcam using gstreamer with python.
How can I do it?
If anyone has a related article or source code please share.
I have tried this code:
Vidserver.py
import gobject, pygst
pygst.require("0.10")
import gst
source pad
def new_decode_pad(dbin, pad, islast):
pad.link(decode.get_pad("xvimagesink"))
pipeline = gst.Pipeline("server")
tcpsrc = gst.element_factory_make("tcpserversrc", "source")
pipeline.add(tcpsrc)
tcpsrc.set_property("host", "localhost")
tcpsrc.set_property("port", 5000)
decode = gst.element_factory_make("decodebin", "decode")
decode.connect("new-decoded-pad", new_decode_pad)
pipeline.add(decode)
tcpsrc.link(decode)
def new_xvimage_pad(ximg, pad, islast):
xvimage = gst.element_factory_make("xvimagesink", "imagesink")
xvimage.connect("new-xvimaged-pad", new_xvimage_pad)
pipeline.add(xvimage)
decode.link(xvimage)
pipeline.set_state(gst.STATE_PLAYING)
loop = gobject.MainLoop()
loop.run()
Vidclient.py
import gobject, pygst
pygst.require("0.10")
import gst
# create a pipeline and add [ filesrc ! tcpclientsink ]
pipeline = gst.Pipeline("client")
src = gst.element_factory_make("filesrc", "source")
src.set_property("location", "/home/sam/Desktop/ramstien.mpg")
pipeline.add(src)
def new_mux_pad(mux, pad, islast):
pad.link(mux.get_pad("mpeg"))
mux = gst.element_factory_make("dvddemux", "mux")
mux.connect("new-muxed-pad", new_mux_pad)
pipeline.add(mux)
src.link(mux)
def new_mpeg_pad(mpg, pad, islast):
pad.link(mpeg.get_pad("theora"))
mpeg = gst.element_factory_make("mpeg2dec", "mpeg")
mpeg.connect("new-mpeged-pad", new_mpeg_pad)
pipeline.add(mpeg)
mux.link(mpeg)
def new_theoraenc_pad(thr, pad, islast):
pad.link(theora.get_pad("ogg"))
theora = gst.element_factory_make("theoraenc", "theora")
theora.connect("new-theoraenced-pad", new_theoraenc_pad)
pipeline.add(theora)
mpeg.link(theora)
def new_ogg_pad(og, pad, islast):
ogg = gst.element_factory_make("oggmux", "ogg")
ogg.connect("new-ogged-pad", new_ogg_pad)
pipeline.add(ogg)
theora.link(ogg)
client = gst.element_factory_make("tcpclientsink", "client")
pipeline.add(client)
client.set_property("host", "localhost")
client.set_property("port", 5000)
ogg.link(client)
pipeline.set_state(gst.STATE_PLAYING)
# enter into a mainloop
loop = gobject.MainLoop()
loop.run()
can anyone please guide me what will be the python code for the gstreamer
server command
gst-launch-1.0 -v ximagesrc use-damage=false xname=/usr/lib/torcs/torcs-bin ! videoconvert ! videoscale ! video/x-raw,format=I420,width=800,height=600,framerate=25/1 ! jpegenc ! rtpjpegpay ! udpsink host=127.0.0.1 port=5000
and the gstreamer client command
gst-launch-1.0 udpsrc port=5000 ! application/x-rtp,encoding-name=JPEG,payload=26 ! rtpjpegdepay ! jpegdec ! autovideosink
for mjpeg encoding.
Any help would be greatly appreciated since I've been at this for 2 days. The code looks fine but obviously I'm doing something incorrectly. I've tried adder on 1.2.4 and audiomixer on 1.7.1. Debug logs say the pipeline and it's elements are in paused state when the code exits.
!/usr/bin/python
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject
from gi.repository import Gst as gst
GObject.threads_init()
gst.init(None)
if __name__ == "__main__":
# First create our pipeline
# filesrc ! mad ! audioconvert ! audioresample pipe =
gst.Pipeline("mypipe")
# Create the file source 1
filesrc1 = gst.ElementFactory.make("filesrc","filesrc1")
filesrc1.set_property('location', "test1.mp3")
pipe.add(filesrc1)
mad_convert1 = gst.ElementFactory.make("mad","mad_convert1")
pipe.add(mad_convert1)
filesrc1.link(mad_convert1)
audio_convert1 = gst.ElementFactory.make("audioconvert","audio_convert1")
pipe.add(audio_convert1)
mad_convert1.link(audio_convert1)
audio_resample1 = gst.ElementFactory.make("audioresample","audio_resample1")
pipe.add(audio_resample1)
audio_convert1.link(audio_resample1)
# Create the file source 2
filesrc2 = gst.ElementFactory.make("filesrc","filesrc2")
filesrc2.set_property('location', "test2.mp3")
pipe.add(filesrc2)
mad_convert2 = gst.ElementFactory.make("mad","mad_convert2")
pipe.add(mad_convert2);
filesrc2.link(mad_convert2)
audio_convert2 = gst.ElementFactory.make("audioconvert","audio_convert2")
pipe.add(audio_convert2)
mad_convert2.link(audio_convert2)
audio_resample2 = gst.ElementFactory.make("audioresample","audio_resample2")
pipe.add(audio_resample2)
audio_convert2.link(audio_resample2)
# Create a software mixer with "Adder"
adder = gst.ElementFactory.make("audiomixer","audiomixer")
pipe.add(adder)
# Gather a request sink pad on the mixer
sinkpad1=adder.get_request_pad("sink_%u")
# Get an another request sink pad on the mixer
sinkpad2=adder.get_request_pad("sink_%u")
# .. and connect it's source pad to the previously gathered request pad
audio_resample1_src=audio_resample1.get_static_pad("src")
audio_resample1_src.link(sinkpad1)
audio_resample2_src = audio_resample2.get_static_pad("src")
audio_resample2_src.link(sinkpad2)
# Add some output
audio_convert_out = gst.ElementFactory.make("audioconvert","audio_convert_out")
pipe.add(audio_convert_out)
output = gst.ElementFactory.make("filesink", "audio_out")
output.set_property('location', "test_out.mp3")
pipe.add(output)
Your output would not be an 'mp3', but raw audio since you don't reencode after mixing.
Also I'd recommend to check the return values of all the link() calls
And most importantly you never set the pipeline to playing.
Using LibVLC, I'm trying to save a stream while playing it. This is the python code:
import os
import sys
import vlc
if __name__ == '__main__':
filepath = <either-some-url-or-local-path>
movie = os.path.expanduser(filepath)
if 'http://' not in filepath:
if not os.access(movie, os.R_OK):
print ( 'Error: %s file is not readable' % movie )
sys.exit(1)
instance = vlc.Instance("--sub-source marq --sout=file/ps:example.mpg")
try:
media = instance.media_new(movie)
except NameError:
print ('NameError: % (%s vs Libvlc %s)' % (sys.exc_info()[1],
vlc.__version__, vlc.libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
player.play()
#dont exit!
while(1):
continue
It saves the video stream to a file example.mpg. As per this doc, the command to save a stream is this :
--sout=file/ps:example.mpg
which I've using when creating an instance of vlc.Instance:
instance = vlc.Instance("--sub-source marq --sout=file/ps:example.mpg")
But the problem is that it only saves the stream, it doesn't play the stream simultaneously.
Is there any way (in LibVLC) I can save the stream (to a local file) while paying it?
Although, I'm looking for a solution in Python 3.3.1 but it is fine if there is any C or C++ solution.
I've created a similar, but not duplicate, topic yesterday.
Idea:
The basic idea is simple enough. You have to duplicate the output stream and redirect it to a file. This is done, as Maresh correctly pointed out, using the sout=#duplicate{...} directive.
Working Solution:
The following solution works on my machine ™. I've tested it on Ubuntu 12.10 with VLC v2.0.3 (TwoFlower) and Python 2.7.1. I think it should also work on Python 3 since most of the heavy lifting is done by libVlc anyway.
import os
import sys
import vlc
if __name__ == '__main__':
#filepath = <either-some-url-or-local-path>
movie = os.path.expanduser(filepath)
if 'http://' not in filepath:
if not os.access(movie, os.R_OK):
print ( 'Error: %s file is not readable' % movie )
sys.exit(1)
instance = vlc.Instance("--sout=#duplicate{dst=file{dst=example.mpg},dst=display}")
try:
media = instance.media_new(movie)
except NameError:
print ('NameError: % (%s vs Libvlc %s)' % (sys.exc_info()[1],
vlc.__version__, vlc.libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
player.play()
#dont exit!
while(1):
continue
Helpful Links
The Command-Line help was essential to decipher the plethora of VLCs
command line options.
Chapter 3 of VLC streaming HowTo. Explains the structure of the stream output, its directives and describes of the various available modules. Chapter 4 shows some examples.
LibVLC API documentation in case you want to change media option at
runtime
Update - Saving YouTube videos:
The above code doesn't play nice with YouTube. I searched around and discovered that an additional transcode directive can be used to convert YouTube's video stream to a regular video format. I used #transcode{vcodec=mp4v,acodec=mpga,vb=800,ab=128,deinterlace}
vcodec=mp4v is the video format you want to encode in (mp4v is MPEG-4, mpgv is MPEG-1, and there is also h263, DIV1, DIV2, DIV3, I420, I422, I444, RV24, YUY2).
acodec=mpga is the audio format you want to encode in (mpga is MPEG audio layer 2, a52 is A52 i.e. AC3 sound).
vb=800 is the video bitrate in Kbit/s.
ab=128 is the audio bitrate in Kbit/s.
deinterlace tells VLC to deinterlace the video on the fly.
The updated code looks like this:
import os
import sys
import vlc
if __name__ == '__main__':
#filepath = <either-some-url-or-local-path>
filepath = "http://r1---sn-nfpnnjvh-1gil.c.youtube.com/videoplayback?source=youtube&newshard=yes&fexp=936100%2C906397%2C928201%2C929117%2C929123%2C929121%2C929915%2C929906%2C929907%2C929125%2C929127%2C925714%2C929917%2C929919%2C912512%2C912515%2C912521%2C906838%2C904485%2C906840%2C931913%2C904830%2C919373%2C933701%2C904122%2C932216%2C936303%2C909421%2C912711%2C907228%2C935000&sver=3&expire=1373237257&mt=1373214031&mv=m&ratebypass=yes&id=1907b7271247a714&ms=au&ipbits=48&sparams=cp%2Cid%2Cip%2Cipbits%2Citag%2Cratebypass%2Csource%2Cupn%2Cexpire&itag=45&key=yt1&ip=2a02%3A120b%3Ac3c6%3A7190%3A6823%3Af2d%3A732c%3A3577&upn=z3zzcrvPC0U&cp=U0hWSFJOVV9KUUNONl9KSFlDOmt4Y3dEWFo3dDFu&signature=D6049FD7CD5FBD2CC6CD4D60411EE492AA0E9A77.5D0562CCF4E10A6CC53B62AAFFF6CB3BB0BA91C0"
movie = os.path.expanduser(filepath)
savedcopy = "yt-stream.mpg"
if 'http://' not in filepath:
if not os.access(movie, os.R_OK):
print ( 'Error: %s file is not readable' % movie )
sys.exit(1)
instance = vlc.Instance("--sout=#transcode{vcodec=mp4v,acodec=mpga,vb=800,ab=128,deinterlace}:duplicate{dst=file{dst=%s},dst=display}" % savedcopy)
try:
media = instance.media_new(movie)
except NameError:
print ('NameError: % (%s vs Libvlc %s)' % (sys.exc_info()[1],
vlc.__version__, vlc.libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
player.play()
#dont exit!
while(1):
continue
A couple of important points:
I've used MPEG audio and video codecs in the transcode directive. It seems to be important to use a matching extensions for the output file (mpg in this case). Otherwise VLC gets confused when opening the saved file for playback. Keep that in mind if you decide to switch to another video format.
You cannot add a regular YouTube URL as filepath. Instead you have to specify the location of the video itself. That's the reason why the filepath that I've used looks so cryptic. That filepath corresponds to video at http://www.youtube.com/watch?v=GQe3JxJHpxQ. VLC itself is able to extract the video location from a given YouTube URL, but libVLC doesn't do that out of the box. You'll have to write your own resolver to do that. See this related SO question. I followed this approach to manually resolve the video location for my tests.
I think you need to duplicate the output in order to play and record it at the same time:
vlc.Instance("--sub-source marq --sout=#stream_out_duplicate{dst=display,dst=std{access=file,mux=ts,dst=/path/file.mpg}}")
or
libvlc_media_add_option(media, ":sout=#stream_out_duplicate{dst=display,dst=std{access=file,mux=ts,dst=/path/file.mpg}}")
Did you try adding to the list of options the following option?
--sout-display
i.e.
instance = vlc.Instance("--sub-source marq --sout=file/ps:example.mpg --sout-display")
Some time ago in a sample code in the active state website i saw someone played and recorded a MP3 file using VLC using the vlc.py module. You can take a look at it's sample code to see how to duplicate a stream. I copied th code here for you (I copied it from http://code.activestate.com/recipes/577802-using-vlcpy-to-record-an-mp3-and-save-a-cue-file/):
import vlc
import time
import os
def new_filename(ext = '.mp3'):
"find a free filename in 00000000..99999999"
D = set(x[:8] for x in os.listdir('.')
if (x.endswith(ext) or x.endswith('.cue')) and len(x) == 12)
for i in xrange(10**8):
s = "%08i" %i
if s not in D:
return s
def initialize_cue_file(name,instream,audiofile):
"create a cue file and write some data, then return it"
cueout = '%s.cue' %name
outf = file(cueout,'w')
outf.write('PERFORMER "%s"\n' %instream)
outf.write('TITLE "%s"\n' %name)
outf.write('FILE "%s" WAVE\n' %audiofile)
outf.flush()
return outf
def initialize_player(instream, audiofile):
"initialize a vlc player which plays locally and saves to an mp3file"
inst = vlc.Instance()
p = inst.media_player_new()
cmd1 = "sout=#duplicate{dst=file{dst=%s},dst=display}" %audiofile
cmd2 ="no-sout-rtp-sap"
cmd3 = "no-sout-standard-sap"
cmd4 ="sout-keep"
med=inst.media_new(instream,cmd1,cmd2,cmd3,cmd4)
med.get_mrl()
p.set_media(med)
return p, med
def write_track_meta_to_cuefile(outf,instream,idx,meta,millisecs):
"write the next track info to the cue file"
outf.write(' TRACK %02i AUDIO\n' %idx)
outf.write(' TITLE "%s"\n' %meta)
outf.write(' PERFORMER "%s"\n' %instream)
m = millisecs // 60000
s = (millisecs - (m*60000)) // 1000
hs = (millisecs - (m*60000) - (s*1000)) //10
ts = '%02i:%02i:%02i' %(m,s,hs)
outf.write(' INDEX 01 %s\n' %ts)
outf.flush()
def test():
#some online audio stream for which this currently works ....
instream = 'http://streamer-mtc-aa05.somafm.com:80/stream/1018'
#if the output filename ends with mp3 vlc knows which mux to use
ext = '.mp3'
name = new_filename(ext)
audiofile = '%s%s' %(name,ext)
outf = initialize_cue_file(name,instream,audiofile)
p,med = initialize_player(instream, audiofile)
p.play()
np = None
i = 0
while 1:
time.sleep(.1)
new = med.get_meta(12)
if new != np:
i +=1
t = p.get_time()
print "millisecs: %i" %t
write_track_meta_to_cuefile(outf,instream,i,new,t)
np = new
print "now playing: %s" %np
if __name__=='__main__':
test()
Perhaps you need to clone your output, as suggested on the forum?