Python gi.repository.Gst.parse_launch() Examples

The following are 15 code examples of gi.repository.Gst.parse_launch(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module gi.repository.Gst , or try the search function .
Example #1
Source File: gstreamer.py    From project-posenet with Apache License 2.0 7 votes vote down vote up
def __init__(self, pipeline, inf_callback, render_callback, src_size):
        self.inf_callback = inf_callback
        self.render_callback = render_callback
        self.running = False
        self.gstbuffer = None
        self.output = None
        self.sink_size = None
        self.src_size = src_size
        self.box = None
        self.condition = threading.Condition()

        self.pipeline = Gst.parse_launch(pipeline)
        self.freezer = self.pipeline.get_by_name('freezer')
        self.overlay = self.pipeline.get_by_name('overlay')
        self.overlaysink = self.pipeline.get_by_name('overlaysink')
        appsink = self.pipeline.get_by_name('appsink')
        appsink.connect('new-sample', self.on_new_sample)

        # Set up a pipeline bus watch to catch errors.
        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_bus_message)

        # Set up a full screen window on Coral, no-op otherwise.
        self.setup_window() 
Example #2
Source File: player.py    From lplayer with MIT License 7 votes vote down vote up
def get_player(self):
        player = Gst.parse_launch('uridecodebin name=urisrc !\
 audioconvert ! audioresample ! queue ! removesilence name=removesilence !\
 audioconvert ! audioresample ! queue ! scaletempo !\
 audioconvert ! audioresample ! volume name=volume !\
 audioamplify name=amplification !\
 equalizer-nbands name=equalizer num-bands=18 !\
 autoaudiosink')
        bus = player.get_bus()
        bus.add_signal_watch()
        bus.connect("message::eos", self.on_eos)
        bus.connect('message::state-changed', self.on_state_changed)
        bus.connect('message', self.on_player_message)
        bus.connect("message::error", self.test)
        bus.connect("message::application", self.test)
        bus.connect('sync-message', self.test)
        return player 
Example #3
Source File: gCamera.py    From Cherry-Autonomous-Racecar with MIT License 7 votes vote down vote up
def gCamera():
    print "gstWebCam"
    bridge = CvBridge()
    video ="video4"
    pub = rospy.Publisher('stream', Image, queue_size=10)
    rospy.init_node('GstWebCam',anonymous=True)
    Gst.init(None)
    pipe = Gst.parse_launch("""v4l2src device=/dev/"""+video+""" ! video/x-raw, width=640, height=480,format=(string)BGR ! appsink sync=false max-buffers=2 drop=true name=sink emit-signals=true""")
    sink = pipe.get_by_name('sink')
    pipe.set_state(Gst.State.PLAYING)
    while not rospy.is_shutdown():
        sample = sink.emit('pull-sample')    
        img = gst_to_opencv(sample.get_buffer())
        try:
            pub.publish(bridge.cv2_to_imgmsg(img, "bgr8"))
        except CvBridgeError as e:
            print(e) 
Example #4
Source File: gstreamer.py    From examples-camera with Apache License 2.0 7 votes vote down vote up
def __init__(self, pipeline, user_function, src_size):
        self.user_function = user_function
        self.running = False
        self.gstbuffer = None
        self.sink_size = None
        self.src_size = src_size
        self.box = None
        self.condition = threading.Condition()

        self.pipeline = Gst.parse_launch(pipeline)
        self.overlay = self.pipeline.get_by_name('overlay')
        self.overlaysink = self.pipeline.get_by_name('overlaysink')
        appsink = self.pipeline.get_by_name('appsink')
        appsink.connect('new-sample', self.on_new_sample)

        # Set up a pipeline bus watch to catch errors.
        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_bus_message)

        # Set up a full screen window on Coral, no-op otherwise.
        self.setup_window() 
Example #5
Source File: inputoutputoverlay.py    From brave with Apache License 2.0 6 votes vote down vote up
def create_pipeline_from_string(self, pipeline_string):
        try:
            self.logger.debug('Creating with pipeline: ' + pipeline_string)
            self.pipeline = Gst.parse_launch(pipeline_string)
            setup_messaging(pipe=self.pipeline, parent_object=self)
        except GLib.GError as e:
            self.error_message = str(e)
            self.logger.error('Failed to create pipeline [%s]: %s' % (pipeline_string, self.error_message))
            raise brave.exceptions.PipelineFailure(self.error_message) 
Example #6
Source File: audio_graph.py    From Audio-Cutter with GNU General Public License v3.0 6 votes vote down vote up
def _launch_pipeline(self):
        self.pipeline = Gst.parse_launch("uridecodebin name=decode uri=" +
                                         self._uri + " ! waveformbin name=wave"
                                         " ! fakesink qos=false name=faked")

        Gst.ElementFactory.make("uritranscodebin", None)

        clock = GObject.new(GObject.type_from_name("GstCpuThrottlingClock"))
        clock.props.cpu_usage = 90
        self.pipeline.use_clock(clock)

        faked = self.pipeline.get_by_name("faked")
        faked.props.sync = True
        self._wavebin = self.pipeline.get_by_name("wave")

        self._wavebin.props.uri = self._asset.get_id()
        self._wavebin.props.duration = self._asset.get_duration()
        decode = self.pipeline.get_by_name("decode")
        decode.connect("autoplug-select", self._autoplug_select_cb)
        bus = self.pipeline.get_bus()
        self.pipeline.set_state(Gst.State.PLAYING)
        bus.add_signal_watch()
        self.n_samples = self._asset.get_duration() / SAMPLE_DURATION
        bus.connect("message::error", self.__on_bus_error)
        bus.connect("message::eos", self.__on_bus_eos) 
Example #7
Source File: replay_gain.py    From linux-show-player with GNU General Public License v3.0 6 votes vote down vote up
def gain(self):
        pipe = 'uridecodebin uri="{0}" ! audioconvert ! rganalysis \
                reference-level={1} ! fakesink'.format(self.uri, self.ref_level)
        self.gain_pipe = Gst.parse_launch(pipe)

        gain_bus = self.gain_pipe.get_bus()
        gain_bus.add_signal_watch()
        gain_bus.connect('message', self._on_message)

        logging.info('REPLY-GAIN:: started ' + str(self.uri))
        self.gain_pipe.set_state(Gst.State.PLAYING)

        # Block here until EOS
        self.__lock.acquire(False)
        self.__lock.acquire()

        # Reset the pipe
        self.gain_pipe = None

        # Return the computation result
        return self.result 
Example #8
Source File: gstreamer_pipeline.py    From video-analytics-serving with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def validate_config(config):
        template = config["template"]
        pipeline = Gst.parse_launch(template)
        appsink = pipeline.get_by_name("appsink")
        metaconvert = pipeline.get_by_name("metaconvert")
        metapublish = pipeline.get_by_name("destination")
        if appsink is None:
            logger.warning("Missing appsink element")
        if metaconvert is None:
            logger.warning("Missing metaconvert element")
        if metapublish is None:
            logger.warning("Missing metapublish element") 
Example #9
Source File: player.py    From pomodoro-indicator with GNU General Public License v3.0 6 votes vote down vote up
def get_player(self):
        player = Gst.parse_launch('uridecodebin name=urisrc !\
 audioconvert ! audioresample ! queue ! removesilence name=removesilence !\
 audioconvert ! audioresample ! queue ! scaletempo !\
 audioconvert ! audioresample ! volume name=volume !\
 equalizer-10bands name=equalizer ! autoaudiosink')
        bus = player.get_bus()
        bus.add_signal_watch()
        bus.connect('message::state-changed', self.on_state_changed)
        bus.connect('message', self.on_player_message)
        return player 
Example #10
Source File: camera_gi.py    From Tickeys-linux with MIT License 6 votes vote down vote up
def init_camera(self):
        # TODO: This doesn't work when camera resolution is resized at runtime.
        # There must be some other way to release the camera?
        if self._pipeline:
            self._pipeline = None

        video_src = self._video_src
        if video_src == 'v4l2src':
            video_src += ' device=/dev/video%d' % self._index
        elif video_src == 'dc1394src':
            video_src += ' camera-number=%d' % self._index

        if Gst.version() < (1, 0, 0, 0):
            caps = ('video/x-raw-rgb,red_mask=(int)0xff0000,'
                    'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff')
            pl = ('{} ! decodebin name=decoder ! ffmpegcolorspace ! '
                  'appsink name=camerasink emit-signals=True caps={}')
        else:
            caps = 'video/x-raw,format=RGB'
            pl = '{} ! decodebin name=decoder ! videoconvert ! appsink ' + \
                 'name=camerasink emit-signals=True caps={}'

        self._pipeline = Gst.parse_launch(pl.format(video_src, caps))
        self._camerasink = self._pipeline.get_by_name('camerasink')
        self._camerasink.connect('new-sample', self._gst_new_sample)
        self._decodebin = self._pipeline.get_by_name('decoder')

        if self._camerasink and not self.stopped:
            self.start() 
Example #11
Source File: camera_gi.py    From Tickeys-linux with MIT License 5 votes vote down vote up
def init_camera(self):
        # TODO: This doesn't work when camera resolution is resized at runtime.
        # There must be some other way to release the camera?
        if self._pipeline:
            self._pipeline = None

        video_src = self._video_src
        if video_src == 'v4l2src':
            video_src += ' device=/dev/video%d' % self._index
        elif video_src == 'dc1394src':
            video_src += ' camera-number=%d' % self._index

        if Gst.version() < (1, 0, 0, 0):
            caps = ('video/x-raw-rgb,red_mask=(int)0xff0000,'
                    'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff')
            pl = ('{} ! decodebin name=decoder ! ffmpegcolorspace ! '
                  'appsink name=camerasink emit-signals=True caps={}')
        else:
            caps = 'video/x-raw,format=RGB'
            pl = '{} ! decodebin name=decoder ! videoconvert ! appsink ' + \
                 'name=camerasink emit-signals=True caps={}'

        self._pipeline = Gst.parse_launch(pl.format(video_src, caps))
        self._camerasink = self._pipeline.get_by_name('camerasink')
        self._camerasink.connect('new-sample', self._gst_new_sample)
        self._decodebin = self._pipeline.get_by_name('decoder')

        if self._camerasink and not self.stopped:
            self.start() 
Example #12
Source File: audiotsmcli_gst.py    From audiotsm with MIT License 5 votes vote down vote up
def __init__(self, tsm_description="audiotsm-wsola"):
        super().__init__()

        self._speed = 0

        # Create the playbin, that will handle the decoding of the audio files
        self.playbin = Gst.ElementFactory.make('playbin', 'playbin')
        self.add(self.playbin)

        # Create the audiotsm bin, that will handle the TSM
        audiotsmbin = Gst.Bin('audiotsm')

        # Create the elements of the audiotsm bin, add them, and link them
        self.tsm = Gst.parse_launch(tsm_description)
        converter = Gst.ElementFactory.make('audioconvert', 'converter')
        encoder = Gst.ElementFactory.make('wavenc', 'encoder')
        self.sink = Gst.ElementFactory.make('filesink', 'sink')

        audiotsmbin.add(self.tsm)
        audiotsmbin.add(converter)
        audiotsmbin.add(encoder)
        audiotsmbin.add(self.sink)

        self.tsm.link(converter)
        converter.link(encoder)
        encoder.link(self.sink)

        # Add the sink pad of the TSM plugin to the audiotsm bin.
        self.tsm_sink_pad = Gst.GhostPad.new(
            'sink', self.tsm.get_static_pad('sink'))
        audiotsmbin.add_pad(self.tsm_sink_pad)

        # And link it to the playbin
        self.playbin.set_property("audio-sink", audiotsmbin)

        bus = self.get_bus()
        bus.add_signal_watch()
        bus.connect("message::error", self._on_error)
        bus.connect("message::eos", self._on_eos) 
Example #13
Source File: audiotsmgtk.py    From audiotsm with MIT License 5 votes vote down vote up
def __init__(self, tsm_description="audiotsm-phase-vocoder"):
        super().__init__()

        self._speed = 1.0
        self._speed_set = False

        # Create the playbin, that will handle the decoding of the audio files
        self.playbin = Gst.ElementFactory.make('playbin', 'playbin')
        self.add(self.playbin)

        # Create the audiotsm bin, that will handle the TSM
        audiotsmbin = Gst.Bin('audiotsm')

        # Create the elements of the audiotsm bin, add them, and link them
        self.tsm = Gst.parse_launch(tsm_description)
        converter = Gst.ElementFactory.make('audioconvert', 'converter')
        sink = Gst.ElementFactory.make('autoaudiosink', 'sink')

        audiotsmbin.add(self.tsm)
        audiotsmbin.add(converter)
        audiotsmbin.add(sink)

        self.tsm.link(converter)
        converter.link(sink)

        # Add the sink pad of the TSM plugin to the audiotsm bin.
        self.tsm_sink_pad = Gst.GhostPad.new(
            'sink', self.tsm.get_static_pad('sink'))
        audiotsmbin.add_pad(self.tsm_sink_pad)

        # And link it to the playbin
        self.playbin.set_property("audio-sink", audiotsmbin)

        bus = self.get_bus()
        bus.add_signal_watch()
        bus.connect("message::error", self._on_error)
        bus.connect("message::eos", self._on_eos)
        bus.connect("message::state-changed", self._on_state_changed)

        # The timer that will emit the position-changed signal
        self.position_timer = None 
Example #14
Source File: gstreamer_pipeline.py    From video-analytics-serving with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def start(self):

        self.request["models"] = self.models
        self._gst_launch_string = string.Formatter().vformat(
            self.template, [], self.request)

        with(self._create_delete_lock):
            if (self.start_time is not None):
                return

            logger.debug("Starting Pipeline {id}".format(id=self.identifier))
            logger.debug(self._gst_launch_string)

            self.pipeline = Gst.parse_launch(self._gst_launch_string)
            self._set_properties()
            self._set_default_models()
            self._cache_inference_elements()
            sink = self.pipeline.get_by_name("appsink")

            if sink is not None:
                sink.set_property("emit-signals", True)
                sink.set_property('sync', False)
                sink.connect("new-sample", GStreamerPipeline.on_sample, self)
                self.avg_fps = 0

            src = self.pipeline.get_by_name("source")

            if src and sink:
                src_pad = src.get_static_pad("src")
                if (src_pad):
                    src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                      GStreamerPipeline.source_probe_callback, self)
                else:
                    src.connect(
                        "pad-added", GStreamerPipeline.source_pad_added_callback, self)
                sink_pad = sink.get_static_pad("sink")
                sink_pad.add_probe(Gst.PadProbeType.BUFFER,
                                   GStreamerPipeline.appsink_probe_callback, self)

            bus = self.pipeline.get_bus()
            bus.add_signal_watch()
            self._bus_connection_id = bus.connect("message", self.bus_call)
            splitmuxsink = self.pipeline.get_by_name("splitmuxsink")
            self._real_base = None

            if (not splitmuxsink is None):
                splitmuxsink.connect("format-location-full",
                                     self.format_location_callback,
                                     None)

            self.pipeline.set_state(Gst.State.PLAYING)
            self.start_time = time.time() 
Example #15
Source File: gstreamer.py    From project-teachable with Apache License 2.0 4 votes vote down vote up
def run_pipeline(user_function,
                 src_size=(640,480),
                 appsink_size=(320, 180)):
    PIPELINE = 'v4l2src device=/dev/video0 ! {src_caps} ! {leaky_q} '
    if detectCoralDevBoard():
        SRC_CAPS = 'video/x-raw,format=YUY2,width={width},height={height},framerate=30/1'
        PIPELINE += """ ! glupload ! tee name=t
            t. ! {leaky_q} ! glfilterbin filter=glcolorscale
               ! {dl_caps} ! videoconvert ! {sink_caps} ! {sink_element}
            t. ! {leaky_q} ! glfilterbin filter=glcolorscale
               ! rsvgoverlay name=overlay ! waylandsink
        """
    else:
        SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1'
        PIPELINE += """ ! tee name=t
            t. ! {leaky_q} ! videoconvert ! videoscale ! {sink_caps} ! {sink_element}
            t. ! {leaky_q} ! videoconvert
               ! rsvgoverlay name=overlay ! videoconvert ! autovideosink
            """

    SINK_ELEMENT = 'appsink name=appsink sync=false emit-signals=true max-buffers=1 drop=true'
    DL_CAPS = 'video/x-raw,format=RGBA,width={width},height={height}'
    SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
    LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'

    src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1])
    dl_caps = DL_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    sink_caps = SINK_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    pipeline = PIPELINE.format(leaky_q=LEAKY_Q,
        src_caps=src_caps, dl_caps=dl_caps, sink_caps=sink_caps,
        sink_element=SINK_ELEMENT)

    print('Gstreamer pipeline: ', pipeline)
    pipeline = Gst.parse_launch(pipeline)

    overlay = pipeline.get_by_name('overlay')
    appsink = pipeline.get_by_name('appsink')
    appsink.connect('new-sample', partial(on_new_sample,
        overlay=overlay, screen_size = src_size,
        appsink_size=appsink_size, user_function=user_function))
    loop = GObject.MainLoop()

    # Set up a pipeline bus watch to catch errors.
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect('message', on_bus_message, loop)

    # Run pipeline.
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    # Clean up.
    pipeline.set_state(Gst.State.NULL)
    while GLib.MainContext.default().iteration(False):
        pass