Python redis.exceptions.TimeoutError() Examples

The following are 17 code examples of redis.exceptions.TimeoutError(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module redis.exceptions , or try the search function .
Example #1
Source File: nextid.py    From pottery with Apache License 2.0 8 votes vote down vote up
def _current_id(self, value):
        futures, num_masters_set = set(), 0
        with concurrent.futures.ThreadPoolExecutor() as executor:
            for master in self.masters:
                future = executor.submit(
                    self._set_id_script,
                    keys=(self.key,),
                    args=(value,),
                    client=master,
                )
                futures.add(future)
            for future in concurrent.futures.as_completed(futures):
                with contextlib.suppress(TimeoutError, ConnectionError):
                    num_masters_set += future.result() == value
        if num_masters_set < len(self.masters) // 2 + 1:
            raise QuorumNotAchieved(self.masters, self.key) 
Example #2
Source File: redlock.py    From pottery with Apache License 2.0 6 votes vote down vote up
def _acquire_masters(self):
        self._value = os.urandom(self.num_random_bytes)
        self._extension_num = 0
        futures, num_masters_acquired = set(), 0
        with ContextTimer() as timer, \
             concurrent.futures.ThreadPoolExecutor() as executor:
            for master in self.masters:
                futures.add(executor.submit(self._acquire_master, master))
            for future in concurrent.futures.as_completed(futures):
                with contextlib.suppress(TimeoutError, ConnectionError):
                    num_masters_acquired += future.result()
            quorum = num_masters_acquired >= len(self.masters) // 2 + 1
            elapsed = timer.elapsed() - self._drift()
            validity_time = self.auto_release_time - elapsed
        if quorum and max(validity_time, 0):
            return True
        else:
            with contextlib.suppress(ReleaseUnlockedLock):
                self.release()
            return False 
Example #3
Source File: clients.py    From rb with Apache License 2.0 6 votes vote down vote up
def execute_command(self, *args, **options):
        pool = self.connection_pool
        command_name = args[0]
        command_args = args[1:]
        router = self.connection_pool.cluster.get_router()
        host_id = router.get_host_for_command(command_name, command_args)
        connection = pool.get_connection(command_name, shard_hint=host_id)
        try:
            connection.send_command(*args)
            return self.parse_response(connection, command_name, **options)
        except (ConnectionError, TimeoutError) as e:
            connection.disconnect()
            if not connection.retry_on_timeout and isinstance(e, TimeoutError):
                raise
            connection.send_command(*args)
            return self.parse_response(connection, command_name, **options)
        finally:
            pool.release(connection)

    # Custom Public API 
Example #4
Source File: cache.py    From orion-server with MIT License 6 votes vote down vote up
def set(self, key, value, ttl):
        """
        Set the value for a key. Dark writes to the backup in-memory store are always performed
        to synchronize the state of the in-memory store with Redis, so that read failovers do not
        sacrifice the consistency of the underlying data.

        :param key: Raw key.
        :param value: Associated value.
        :param ttl: Time to live, in milliseconds.
        """
        try:
            return self.redis.set(key, value, px=ttl)
        except (ConnectionError, TimeoutError):
            pass
        finally:
            return self.memory.set(key, value, ttl) 
Example #5
Source File: cache.py    From orion-server with MIT License 5 votes vote down vote up
def delete(self, key):
        """
        Invalidate a cache entry. Like the other write operation set(), dark writes are always
        performed to keep the in-memory cache consistent with Redis in the event of a failover.

        :param key: Raw key.
        """
        try:
            return self.redis.delete(key)
        except (ConnectionError, TimeoutError):
            pass
        finally:
            return self.memory.delete(key) 
Example #6
Source File: redlock.py    From pottery with Apache License 2.0 5 votes vote down vote up
def extend(self):
        '''Extend our hold on the lock (if we currently hold it).

        Usage:

            >>> printer_lock = Redlock(key='printer')
            >>> printer_lock.acquire()
            True
            >>> 9 * 1000 < printer_lock.locked() < 10 * 1000
            True
            >>> time.sleep(1)
            >>> 8 * 1000 < printer_lock.locked() < 9 * 1000
            True
            >>> printer_lock.extend()
            True
            >>> 9 * 1000 < printer_lock.locked() < 10 * 1000
            True
            >>> printer_lock.release()
        '''
        if self._extension_num >= self.num_extensions:
            raise TooManyExtensions(self.masters, self.key)
        else:
            futures, num_masters_extended = set(), 0
            with concurrent.futures.ThreadPoolExecutor() as executor:
                for master in self.masters:
                    futures.add(executor.submit(self._extend_master, master))
                for future in concurrent.futures.as_completed(futures):
                    with contextlib.suppress(TimeoutError, ConnectionError):
                        num_masters_extended += future.result()
            quorum = num_masters_extended >= len(self.masters) // 2 + 1
            self._extension_num += quorum
            return quorum 
Example #7
Source File: nextid.py    From pottery with Apache License 2.0 5 votes vote down vote up
def _current_id(self):
        futures, current_id, num_masters_gotten = set(), 0, 0
        with concurrent.futures.ThreadPoolExecutor() as executor:
            for master in self.masters:
                futures.add(executor.submit(master.get, self.key))
            for future in concurrent.futures.as_completed(futures):
                with contextlib.suppress(TimeoutError, ConnectionError):
                    current_id = max(current_id, int(future.result()))
                    num_masters_gotten += 1
        if num_masters_gotten < len(self.masters) // 2 + 1:
            raise QuorumNotAchieved(self.masters, self.key)
        else:
            return current_id 
Example #8
Source File: impl_redis.py    From taskflow with Apache License 2.0 5 votes vote down vote up
def _translate_failures():
    """Translates common redis exceptions into taskflow exceptions."""
    try:
        yield
    except redis_exceptions.ConnectionError:
        exc.raise_with_cause(exc.JobFailure, "Failed to connect to redis")
    except redis_exceptions.TimeoutError:
        exc.raise_with_cause(exc.JobFailure,
                             "Failed to communicate with redis, connection"
                             " timed out")
    except redis_exceptions.RedisError:
        exc.raise_with_cause(exc.JobFailure,
                             "Failed to communicate with redis,"
                             " internal error") 
Example #9
Source File: base.py    From eNMS with GNU General Public License v3.0 5 votes vote down vote up
def redis(self, operation, *args, **kwargs):
        try:
            return getattr(self.redis_queue, operation)(*args, **kwargs)
        except (ConnectionError, TimeoutError) as exc:
            self.log("error", f"Redis Queue Unreachable ({exc})", change_log=False) 
Example #10
Source File: io.py    From platypush with MIT License 5 votes vote down vote up
def _data_throttler(self):
        from redis.exceptions import TimeoutError as QueueTimeoutError

        def run():
            redis = self._get_redis()
            last_processed_batch_timestamp = None
            data = {}

            try:
                while True:
                    try:
                        new_data = ast.literal_eval(
                            redis.blpop(self._DATA_THROTTLER_QUEUE)[1].decode('utf-8'))

                        for (key, value) in new_data.items():
                            data.setdefault(key, []).append(value)
                    except QueueTimeoutError:
                        pass

                    if data and (last_processed_batch_timestamp is None or
                                 time.time() - last_processed_batch_timestamp >= self.throttle_seconds):
                        last_processed_batch_timestamp = time.time()
                        self.logger.info('Processing feeds batch for Adafruit IO')

                        for (feed, values) in data.items():
                            if values:
                                value = statistics.mean(values)

                                try:
                                    self.send(feed, value, enqueue=False)
                                except ThrottlingError:
                                    self.logger.warning('Adafruit IO throttling threshold hit, taking a nap ' +
                                                        'before retrying')
                                    time.sleep(self.throttle_seconds)

                        data = {}
            except Exception as e:
                self.logger.exception(e)

        return run 
Example #11
Source File: test_framework.py    From panoptes with Apache License 2.0 5 votes vote down vote up
def get(self, key):
        if self.timeout:
            raise TimeoutError
        else:
            return super(PanoptesMockRedis, self).get(key) 
Example #12
Source File: cache.py    From orion-server with MIT License 5 votes vote down vote up
def get(self, key):
        """
        Get the value for a key, prioritizing Redis if available.

        :param key: Raw key.
        :return: Associated value.
        """
        try:
            return self.redis.get(key)
        except (ConnectionError, TimeoutError):
            return self.memory.get(key) 
Example #13
Source File: clients.py    From rb with Apache License 2.0 5 votes vote down vote up
def join(self, timeout=None):
        """Waits for all outstanding responses to come back or the timeout
        to be hit.
        """
        remaining = timeout

        while self._cb_poll and (remaining is None or remaining > 0):
            now = time.time()
            rv = self._cb_poll.poll(remaining)
            if remaining is not None:
                remaining -= (time.time() - now)

            for command_buffer, event in rv:
                # This command buffer still has pending requests which
                # means we have to send them out first before we can read
                # all the data from it.
                if command_buffer.has_pending_requests:
                    if event == 'close':
                        self._try_reconnect(command_buffer)
                    elif event == 'write':
                        self._send_or_reconnect(command_buffer)

                # The general assumption is that all response is available
                # or this might block.  On reading we do not use async
                # receiving.  This generally works because latency in the
                # network is low and redis is super quick in sending.  It
                # does not make a lot of sense to complicate things here.
                elif event in ('read', 'close'):
                    try:
                        command_buffer.wait_for_responses(self)
                    finally:
                        self._release_command_buffer(command_buffer)

        if self._cb_poll and timeout is not None:
            raise TimeoutError('Did not receive all data in time.') 
Example #14
Source File: clients.py    From rb with Apache License 2.0 5 votes vote down vote up
def send_buffer(self):
        """Utility function that sends the buffer into the provided socket.
        The buffer itself will slowly clear out and is modified in place.
        """
        buf = self._send_buf
        sock = self.connection._sock
        try:
            timeout = sock.gettimeout()
            sock.setblocking(False)
            try:
                for idx, item in enumerate(buf):
                    sent = 0
                    while 1:
                        try:
                            sent = sock.send(item)
                        except IOError as e:
                            if e.errno == errno.EAGAIN:
                                continue
                            elif e.errno == errno.EWOULDBLOCK:
                                break
                            raise
                        self.sent_something = True
                        break
                    if sent < len(item):
                        buf[:idx + 1] = [item[sent:]]
                        break
                else:
                    del buf[:]
            finally:
                sock.settimeout(timeout)
        except IOError as e:
            self.connection.disconnect()
            if isinstance(e, socket.timeout):
                raise TimeoutError('Timeout writing to socket (host %s)'
                                   % self.host_id)
            raise ConnectionError('Error while writing to socket (host %s): %s'
                                  % (self.host_id, e)) 
Example #15
Source File: test_framework.py    From panoptes with Apache License 2.0 5 votes vote down vote up
def test_context_key_value_store_timeout(self):
        panoptes_context = PanoptesContext(self.panoptes_test_conf_file,
                                           key_value_store_class_list=[PanoptesTestKeyValueStore])
        kv = panoptes_context.get_kv_store(PanoptesTestKeyValueStore)
        with self.assertRaises(TimeoutError):
            kv.set(u'test', u'test')
        with self.assertRaises(TimeoutError):
            kv.get(u'test') 
Example #16
Source File: test_framework.py    From panoptes with Apache License 2.0 5 votes vote down vote up
def set(self, key, value, ex=None, px=None, nx=False, xx=False):
        if self.timeout:
            raise TimeoutError
        else:
            return super(PanoptesMockRedis, self).set(key, value, ex=ex, px=px, nx=nx, xx=xx) 
Example #17
Source File: redlock.py    From pottery with Apache License 2.0 4 votes vote down vote up
def locked(self):
        '''How much longer we'll hold the lock (unless we extend or release it).

        If we don't currently hold the lock, then this method returns 0.

            >>> printer_lock_1 = Redlock(key='printer')
            >>> printer_lock_1.locked()
            0

            >>> printer_lock_2 = Redlock(key='printer')
            >>> printer_lock_2.acquire()
            True
            >>> printer_lock_1.locked()
            0
            >>> printer_lock_2.release()

        If we do currently hold the lock, then this method returns the current
        lease's Time To Live (TTL) in ms.

            >>> printer_lock_1.acquire()
            True
            >>> 9 * 1000 < printer_lock_1.locked() < 10 * 1000
            True
            >>> printer_lock_1.release()

        '''
        futures, num_masters_acquired, ttls = set(), 0, []
        with ContextTimer() as timer, \
             concurrent.futures.ThreadPoolExecutor() as executor:
            for master in self.masters:
                futures.add(executor.submit(self._acquired_master, master))
            for future in concurrent.futures.as_completed(futures):
                with contextlib.suppress(TimeoutError, ConnectionError):
                    ttl = future.result()
                    num_masters_acquired += ttl > 0
                    ttls.append(ttl)
            quorum = num_masters_acquired >= len(self.masters) // 2 + 1
            if quorum:
                ttls = sorted(ttls, reverse=True)
                validity_time = ttls[len(self.masters) // 2]
                validity_time -= timer.elapsed() + self._drift()
                return max(validity_time, 0)
            else:
                return 0