Python gevent.kill() Examples

The following are 17 code examples of gevent.kill(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module gevent , or try the search function .
Example #1
Source File: videocall.py    From janus-cloud with GNU Affero General Public License v3.0 6 votes vote down vote up
def on_close(self, handle_id):
        self.backend_handle = None #detach with backend handle

        if self._auto_disconnect_greenlet:
            gevent.kill(self._auto_disconnect_greenlet)
            self._auto_disconnect_greenlet = None

        if self.videocall_user and self.videocall_user.incall:
            self.videocall_user.peer_name = ''
            self.videocall_user.incall = False
            self.videocall_user.utime = time.time()
            self._plugin.user_dao.update(self.videocall_user.peer_name)

            hangup_event_data = {
                'videocall': 'event',
                'result' : {
                    "event" : "hangup",
                    "username" : self.videocall_user.username,
                    "reason" : "backend handle closed"
                }
            }
            self._push_plugin_event(hangup_event_data, None, None) 
Example #2
Source File: generic.py    From sync-engine with GNU Affero General Public License v3.0 6 votes vote down vote up
def _run(self):
        # Bind greenlet-local logging context.
        self.log = log.new(account_id=self.account_id, folder=self.folder_name,
                           provider=self.provider_name)
        # eagerly signal the sync status
        self.heartbeat_status.publish()

        try:
            self.update_folder_sync_status(lambda s: s.start_sync())
        except IntegrityError:
            # The state insert failed because the folder ID ForeignKey
            # was no longer valid, ie. the folder for this engine was deleted
            # while we were starting up.
            # Exit the sync and let the monitor sort things out.
            log.info("Folder state loading failed due to IntegrityError",
                     folder_id=self.folder_id, account_id=self.account_id)
            raise MailsyncDone()

        # NOTE: The parent ImapSyncMonitor handler could kill us at any
        # time if it receives a shutdown command. The shutdown command is
        # equivalent to ctrl-c.
        while True:
            retry_with_logging(self._run_impl, account_id=self.account_id,
                               provider=self.provider_name, logger=log) 
Example #3
Source File: backend_session.py    From janus-cloud with GNU Affero General Public License v3.0 6 votes vote down vote up
def destroy(self):
        if self.state == BACKEND_SESSION_STATE_DESTROYED:
            return
        self.state = BACKEND_SESSION_STATE_DESTROYED
        if _sessions.get(self.url) == self:
            _sessions.pop(self.url)

        if self._auto_destroy_greenlet:
            gevent.kill(self._auto_destroy_greenlet)
            self._auto_destroy_greenlet = None

        for handle in self._handles.values():
            handle.on_close()
        self._handles.clear()

        if self._ws_client:
            try:
                self._ws_client.close()
            except Exception:
                pass
            self._ws_client = None 
Example #4
Source File: videocall.py    From janus-cloud with GNU Affero General Public License v3.0 6 votes vote down vote up
def detach(self):
        if self._has_destroy:
            return
        super().detach()
        if self._auto_disconnect_greenlet:
            gevent.kill(self._auto_disconnect_greenlet)
            self._auto_disconnect_greenlet = None

        if self.videocall_user:
            self._plugin.user_dao.del_by_username(self.videocall_user.username)
            self.videocall_user.handle = None
            self.videocall_user = None

        if self.backend_handle:
            backend_handle = self.backend_handle
            self.backend_handle = None
            backend_handle.detach() 
Example #5
Source File: core.py    From BLESuite with MIT License 5 votes vote down vote up
def destroy(self):
        if self.connected:
            log.debug("Disconnecting")
            self.role.stack.disconnect()
            self.role = None

        if self.socket_handler is not None:
            log.debug("Connection quit(), killing remaining connection threads")
            gevent.kill(self.socket_handler_thread)
            self.socket_handler = None
            self.socket_handler_thread = None

        if self.event_handler is not None:
            self.event_handler.__del__()
            self.event_handler = None 
Example #6
Source File: sync.py    From easypy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def kill_this_process(graceful=False):
    from plumbum import local
    pid = os.getpid()
    if graceful:
        flag = '-HUP'
    else:
        flag = '-9'
    local.cmd.kill(flag, pid) 
Example #7
Source File: sync.py    From easypy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _rimt(exc):
        _logger.info('YELLOW<<killing main thread greenlet>>')
        main_thread_greenlet = threading.main_thread()._greenlet
        orig_throw = main_thread_greenlet.throw

        # we must override "throw" method so exception will be raised with the original traceback
        def throw(*args):
            if len(args) == 1:
                ex = args[0]
                return orig_throw(ex.__class__, ex, ex.__traceback__)
            return orig_throw(*args)
        main_thread_greenlet.throw = throw
        gevent.kill(main_thread_greenlet, exc)
        _logger.debug('exiting the thread that failed')
        raise exc 
Example #8
Source File: sync.py    From easypy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def async_raise_in_main_thread(exc, use_concurrent_loop=True):
    """
    Uses a unix signal to raise an exception to be raised in the main thread.
    """

    from plumbum import local
    pid = os.getpid()
    if not REGISTERED_SIGNAL:
        raise NotInitialized()

    # sometimes the signal isn't caught by the main-thread, so we should try a few times (WEKAPP-14543)
    def do_signal(raised_exc):
        global LAST_ERROR
        if LAST_ERROR is not raised_exc:
            _logger.debug("MainThread took the exception - we're done here")
            if use_concurrent_loop:
                raiser.stop()
            return

        _logger.info("Raising %s in main thread", type(LAST_ERROR))
        local.cmd.kill("-%d" % REGISTERED_SIGNAL, pid)

    if use_concurrent_loop:
        from .concurrency import concurrent
        raiser = concurrent(do_signal, raised_exc=exc, loop=True, sleep=30, daemon=True, throw=False)
        raiser.start()
    else:
        do_signal(exc) 
Example #9
Source File: subscription_transport_ws.py    From graphql-python-subscriptions with MIT License 5 votes vote down vote up
def on_open(self):
        if self.ws.protocol is None or (
                GRAPHQL_SUBSCRIPTIONS not in self.ws.protocol):
            self.ws.close(1002)

        def keep_alive_callback():
            if not self.ws.closed:
                self.send_keep_alive()
            else:
                gevent.kill(keep_alive_timer)

        if self.keep_alive:
            keep_alive_timer = gevent.spawn(self.timer, keep_alive_callback,
                                            self.keep_alive) 
Example #10
Source File: main.py    From powerpool with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def exit(self, signal=None):
        """ Handle an exit request """
        self.logger.info("{} {}".format(signal, "*" * 80))
        # Kill the top level greenlet
        gevent.kill(gevent.hub.get_hub().parent) 
Example #11
Source File: show_progress.py    From ffmpeg-python with Apache License 2.0 5 votes vote down vote up
def _watch_progress(handler):
    """Context manager for creating a unix-domain socket and listen for
    ffmpeg progress events.

    The socket filename is yielded from the context manager and the
    socket is closed when the context manager is exited.

    Args:
        handler: a function to be called when progress events are
            received; receives a ``key`` argument and ``value``
            argument. (The example ``show_progress`` below uses tqdm)

    Yields:
        socket_filename: the name of the socket file.
    """
    with _tmpdir_scope() as tmpdir:
        socket_filename = os.path.join(tmpdir, 'sock')
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        with contextlib.closing(sock):
            sock.bind(socket_filename)
            sock.listen(1)
            child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler)
            try:
                yield socket_filename
            except:
                gevent.kill(child)
                raise 
Example #12
Source File: __init__.py    From redis-hashring with MIT License 5 votes vote down vote up
def gevent_stop(self):
        """
        Helper method to stop the node for gevent-based applications.
        """
        import gevent
        gevent.kill(self._poller_greenlet)
        self.remove()
        self._select = select.select 
Example #13
Source File: backend_session.py    From janus-cloud with GNU Affero General Public License v3.0 5 votes vote down vote up
def attach_handle(self, plugin_package_name, opaque_id=None, handle_listener=None):
        """

        :param plugin_pacakge_name:  str plugin package name
        :param opaque_id:   str opaque id
        :param handle_listener: handle related  callback listener which cannot block
        :return: BackendHandle object
        """
        if self.state == BACKEND_SESSION_STATE_DESTROYED:
            raise JanusCloudError('Session has destroy for Janus server: {}'.format(self.url),
                                  JANUS_ERROR_SERVICE_UNAVAILABLE)

        attach_request_msg = create_janus_msg('attach', plugin=plugin_package_name)
        if opaque_id:
            attach_request_msg['opaque_id'] = opaque_id

        response = self.send_request(attach_request_msg)  # would block for IO
        if response['janus'] == 'success':
             handle_id = response['data']['id']
        elif response['janus'] == 'error':
            raise JanusCloudError(
                'attach error for Janus server {} with reason {}'.format(self.url, response['error']['reason']),
                response['error']['code'])
        else:
            raise JanusCloudError(
                'attach error for Janus server: {} with invalid response {}'.format(self.url, response),
                JANUS_ERROR_BAD_GATEWAY)

        # check again when wake up from block IO
        if self.state == BACKEND_SESSION_STATE_DESTROYED:
            raise JanusCloudError('Session has destroy for Janus server: {}'.format(self.url),
                                  JANUS_ERROR_SERVICE_UNAVAILABLE)

        handle = BackendHandle(handle_id, plugin_package_name, self,
                               opaque_id=opaque_id, handle_listener=handle_listener)
        self._handles[handle_id] = handle
        if self._auto_destroy_greenlet:
            gevent.kill(self._auto_destroy_greenlet)
            self._auto_destroy_greenlet = None
        return handle 
Example #14
Source File: weblcds.py    From artisan with GNU General Public License v3.0 4 votes vote down vote up
def startWeb(p,resourcePath,nonesym,timec,timebg,btc,btbg,etc,etbg,showetflag,showbtflag):
    global port, process, static_path, nonesymbol, timecolor, timebackground, btcolor, btbackground, etcolor, etbackground, showet, showbt
    port = p
    static_path = resourcePath
    nonesymbol = nonesym
    timecolor = timec
    timebackground = timebg
    btcolor = btc
    btbackground = btbg
    etcolor = etc
    etbackground = etbg
    showet = showetflag
    showbt = showbtflag
    if psystem() != 'Windows':
        gsignal(SIGQUIT, kill)

    process = mp.Process(name='WebLCDs',target=work,args=(
        port,
        resourcePath,
        nonesym,
        timec,
        timebg,
        btc,
        btbg,
        etc,
        etbg,
        showetflag,
        showbtflag,))
    process.start()
    
    libtime.sleep(4)
    
    if process.is_alive():    
        # check successful start
        url = "http://127.0.0.1:" + str(port) + "/status"
        r = rget(url,timeout=2)
        
        if r.status_code == 200:
            return True
        else:
            return False
    else:
        return False 
Example #15
Source File: produce_and_consume.py    From Barrage with MIT License 4 votes vote down vote up
def terminate():
    """Terminate all greenlets.
    """
    gevent.kill(produce_danmaku)
    gevent.kill(consume_danmaku)
    gevent.kill(heartbeat) 
Example #16
Source File: generic.py    From sync-engine with GNU Affero General Public License v3.0 4 votes vote down vote up
def initial_sync_impl(self, crispin_client):
        # We wrap the block in a try/finally because the change_poller greenlet
        # needs to be killed when this greenlet is interrupted
        change_poller = None
        try:
            assert crispin_client.selected_folder_name == self.folder_name
            remote_uids = crispin_client.all_uids()
            with self.syncmanager_lock:
                with session_scope(self.namespace_id) as db_session:
                    local_uids = common.local_uids(self.account_id, db_session,
                                                   self.folder_id)
                common.remove_deleted_uids(
                    self.account_id, self.folder_id,
                    set(local_uids).difference(remote_uids))

            new_uids = set(remote_uids).difference(local_uids)
            with session_scope(self.namespace_id) as db_session:
                account = db_session.query(Account).get(self.account_id)
                throttled = account.throttled
                self.update_uid_counts(
                    db_session,
                    remote_uid_count=len(remote_uids),
                    # This is the initial size of our download_queue
                    download_uid_count=len(new_uids))

            change_poller = gevent.spawn(self.poll_for_changes)
            bind_context(change_poller, 'changepoller', self.account_id,
                         self.folder_id)
            uids = sorted(new_uids, reverse=True)
            count = 0
            for uid in uids:
                # The speedup from batching appears to be less clear for
                # non-Gmail accounts, so for now just download one-at-a-time.
                self.download_and_commit_uids(crispin_client, [uid])
                self.heartbeat_status.publish()
                count += 1
                if throttled and count >= THROTTLE_COUNT:
                    # Throttled accounts' folders sync at a rate of
                    # 1 message/ minute, after the first approx. THROTTLE_COUNT
                    # messages per folder are synced.
                    # Note this is an approx. limit since we use the #(uids),
                    # not the #(messages).
                    gevent.sleep(THROTTLE_WAIT)
        finally:
            if change_poller is not None:
                # schedule change_poller to die
                gevent.kill(change_poller) 
Example #17
Source File: gmail.py    From sync-engine with GNU Affero General Public License v3.0 4 votes vote down vote up
def initial_sync_impl(self, crispin_client):
        # We wrap the block in a try/finally because the greenlets like
        # change_poller need to be killed when this greenlet is interrupted
        change_poller = None
        try:
            remote_uids = sorted(crispin_client.all_uids(), key=int)
            with self.syncmanager_lock:
                with session_scope(self.namespace_id) as db_session:
                    local_uids = common.local_uids(self.account_id, db_session,
                                                   self.folder_id)
                common.remove_deleted_uids(
                    self.account_id, self.folder_id,
                    set(local_uids) - set(remote_uids))
                unknown_uids = set(remote_uids) - local_uids
                with session_scope(self.namespace_id) as db_session:
                    self.update_uid_counts(
                        db_session, remote_uid_count=len(remote_uids),
                        download_uid_count=len(unknown_uids))

            change_poller = gevent.spawn(self.poll_for_changes)
            bind_context(change_poller, 'changepoller', self.account_id,
                         self.folder_id)

            if self.is_all_mail(crispin_client):
                # Prioritize UIDs for messages in the inbox folder.
                if len(remote_uids) < 1e6:
                    inbox_uids = set(
                        crispin_client.search_uids(['X-GM-LABELS', 'inbox']))
                else:
                    # The search above is really slow (times out) on really
                    # large mailboxes, so bound the search to messages within
                    # the past month in order to get anywhere.
                    since = datetime.utcnow() - timedelta(days=30)
                    inbox_uids = set(crispin_client.search_uids([
                        'X-GM-LABELS', 'inbox',
                        'SINCE', since]))

                uids_to_download = (sorted(unknown_uids - inbox_uids) +
                                    sorted(unknown_uids & inbox_uids))
            else:
                uids_to_download = sorted(unknown_uids)

            for uids in chunk(reversed(uids_to_download), 1024):
                g_metadata = crispin_client.g_metadata(uids)
                # UIDs might have been expunged since sync started, in which
                # case the g_metadata call above will return nothing.
                # They may also have been preemptively downloaded by thread
                # expansion. We can omit such UIDs.
                uids = [u for u in uids if u in g_metadata and u not in
                        self.saved_uids]
                self.batch_download_uids(crispin_client, uids, g_metadata)
        finally:
            if change_poller is not None:
                # schedule change_poller to die
                gevent.kill(change_poller)