Python redis.exceptions.RedisError() Examples
The following are 22
code examples of redis.exceptions.RedisError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
redis.exceptions
, or try the search function
.
Example #1
Source File: redis_db_driver.py From dragonflow with Apache License 2.0 | 6 votes |
def _scan(self, table, key=None, topic=None): (pattern, nodes) = self._query_info(table, topic, key) keys = set() for node in nodes: retry = 0 while retry < self.RETRY_COUNT: LOG.debug('Getting all keys with pattern %s retry %d', pattern, retry) try: node_keys = self._get_all_keys_from_node(node, pattern) keys.update(node_keys) break except exceptions.RedisError: LOG.exception('Error getting keys from node %s:%s', node.ip, node.port) retry += 1 self._cluster.populate_cluster() if retry == self.RETRY_COUNT: raise df_exceptions.DBKeyNotFound('ALL KEYS') return keys
Example #2
Source File: redis_db_driver.py From dragonflow with Apache License 2.0 | 6 votes |
def _bulk_execute(self, node, keys, command, args=()): pipeline = node.client.pipeline(transaction=False) retry = 0 command_pcs = [command, None] command_pcs.extend(args) while retry < self.RETRY_COUNT: for key in keys: command_pcs[1] = key pipeline.execute_command(*command_pcs) try: values = pipeline.execute(raise_on_error=False) return zip(keys, values) except exceptions.RedisError: LOG.exception('Error executing pipeline at retry %d', retry) retry += 1 return False
Example #3
Source File: test_db_tester.py From huskar with MIT License | 6 votes |
def app(): app = create_app() app.config['PROPAGATE_EXCEPTIONS'] = False @app.route('/api/minimal-mode') def minimal_mode(): return unicode(g.auth.is_minimal_mode) @app.route('/api/mysql') def mysql_error(): raise SQLAlchemyError() @app.route('/api/redis') def redis_error(): raise RedisError() return app
Example #4
Source File: client.py From pydisque with MIT License | 6 votes |
def __call__(self, fn): """Function wrapper.""" @wraps(fn) def wrapped_f(*args, **kwargs): c = 0 while c <= self.retry_count: try: return fn(*args, **kwargs) except RedisError: logging.critical("retrying because of this exception - %s", c) logging.exception("exception to retry ") if c == self.retry_count: raise c += 1 return wrapped_f
Example #5
Source File: exceptions.py From CTF_AWD_Platform with MIT License | 6 votes |
def exception_handler(exc, context): """ 自定义异常处理 :param exc: 别的地方抛的异常就会传给exc :param context: 字典形式。抛出异常的上下文(即抛出异常的出处;即抛出异常的视图) :return: Response响应对象 """ # 调用drf框架原生的异常处理方法,把异常和异常出处交给他处理,如果是序列化器异常就直接处理,处理之后就直接返回 response = drf_exception_handler(exc, context) #如果响应为空表示不是序列化器异常,补充数据库异常 if response is None: view = context['view'] if isinstance(exc, DatabaseError) or isinstance(exc, RedisError): # 数据库异常 logger.error('[%s] %s' % (view, exc)) response = Response({'message': '服务器内部错误'}, status=status.HTTP_507_INSUFFICIENT_STORAGE) return response
Example #6
Source File: wrappers.py From textpipe with MIT License | 6 votes |
def word_vec(self, word): # pylint: disable=E0202 """ This method is mimicking the word_vec method from the Gensim KeyedVector class. Instead of looking it up from an in memory dict, it - requests the value from the redis instance, where the key is a combination between an optional word vector model key and the word itself - decompresses it - and finally unpickles it :param word: string :returns: numpy array of dim of the word vector model (for Google: 300, 1) """ try: cache_entry = self._redis.hget(self.key, word) if not cache_entry: raise KeyError(f'Key {word} does not exist in cache') return pickle.loads(cache_entry) except RedisError as exception: raise RedisKeyedVectorException(f'The connection to Redis failed while trying to ' f'retrieve a word vector. Redis error message: ' f'{exception}') except TypeError: return None
Example #7
Source File: wrappers.py From textpipe with MIT License | 6 votes |
def load_keyed_vectors_into_redis(self, model_path, idf_weighting='naive'): """ This function loops over all available words in the loaded word2vec keyed vectors model and loads them into the redis instance. """ model = KeyedVectors.load(model_path, mmap='r') nr_train_tokens = sum(token_vocab.count for token_vocab in model.vocab.values()) self.idf_weighting = idf_weighting try: for word in tqdm(list(model.vocab.keys())): if self.idf_weighting == 'naive': idf = model.vocab[word].count elif self.idf_weighting == 'log': idf = np.log(nr_train_tokens / (model.vocab[word].count + 1)) + 1 else: raise ValueError(f'idf_weighting "{self.idf_weighting}" not available; use ' f'"naive" or "log"') idf_normalized_vector = model[word] / idf self._redis.hset(self.key, word, pickle.dumps(idf_normalized_vector)) except RedisError as exception: raise RedisKeyedVectorException(f'RedisError while trying to load model {model} ' f'into redis: {exception}') del model
Example #8
Source File: impl_redis.py From taskflow with Apache License 2.0 | 5 votes |
def _translate_failures(): """Translates common redis exceptions into taskflow exceptions.""" try: yield except redis_exceptions.ConnectionError: exc.raise_with_cause(exc.JobFailure, "Failed to connect to redis") except redis_exceptions.TimeoutError: exc.raise_with_cause(exc.JobFailure, "Failed to communicate with redis, connection" " timed out") except redis_exceptions.RedisError: exc.raise_with_cause(exc.JobFailure, "Failed to communicate with redis," " internal error")
Example #9
Source File: __init__.py From redlock-py with MIT License | 5 votes |
def lock(self, resource, ttl): retry = 0 val = self.get_unique_id() # Add 2 milliseconds to the drift to account for Redis expires # precision, which is 1 millisecond, plus 1 millisecond min # drift for small TTLs. drift = int(ttl * self.clock_drift_factor) + 2 redis_errors = list() while retry < self.retry_count: n = 0 start_time = int(time.time() * 1000) del redis_errors[:] for server in self.servers: try: if self.lock_instance(server, resource, val, ttl): n += 1 except RedisError as e: redis_errors.append(e) elapsed_time = int(time.time() * 1000) - start_time validity = int(ttl - elapsed_time - drift) if validity > 0 and n >= self.quorum: if redis_errors: raise MultipleRedlockException(redis_errors) return Lock(validity, resource, val) else: for server in self.servers: try: self.unlock_instance(server, resource, val) except: pass retry += 1 time.sleep(self.retry_delay) return False
Example #10
Source File: session.py From huskar with MIT License | 5 votes |
def load_user(self, username=None): username = username or self._name if username is None: return if switch.is_switched_on(SWITCH_ENABLE_MINIMAL_MODE, False): self.enter_minimal_mode(MM_REASON_SWITCH) return try: self._user = User.get_by_name(username) except (SQLAlchemyError, RedisError, socket.error): logger.exception('Enter minimal mode') self.enter_minimal_mode(MM_REASON_AUTH) session_load_user_failed.send(self)
Example #11
Source File: ext.py From huskar with MIT License | 5 votes |
def handle_got_request_exception(self, sender, exception, **extra): tester = current_app.extensions[self.STATE_KEY] if isinstance(exception, (SQLAlchemyError, RedisError)): tester.metrics.on_api_called_sys_exc(*self.DOCTOR_ARGS) else: tester.metrics.on_api_called_unkwn_exc(*self.DOCTOR_ARGS)
Example #12
Source File: models.py From busy-beaver with MIT License | 5 votes |
def get_rq_job(self): try: rq_job = Job.fetch(self.job_id, rq.connection) except (RedisError, NoSuchJobError): return None return rq_job
Example #13
Source File: wrappers.py From textpipe with MIT License | 5 votes |
def __init__(self, uri, key='', max_lru_cache_size=1024, idf_weighting='naive'): self.word_vec = lru_cache(maxsize=max_lru_cache_size)(self.word_vec) self.key = f'w2v_{key}' self.idf_weighting = idf_weighting try: host, port, database = self._parse_uri(uri) self._redis = Redis(host, port, database) except RedisError as exception: raise RedisKeyedVectorException(f'The connection to Redis failed while trying to ' f'initiate the client. Redis error message: ' f'{exception}')
Example #14
Source File: tasks.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def tasks(self): """Get the list of tasks Retrieve the list of tasks stored in the registry :returns: a list of tasks :raises TaskRegistryError: raised when the tasks cannot be listed """ self._rwlock.reader_acquire() try: tasks = [] keys = [] match_prefix = "{}*".format(TASK_PREFIX) total, found = self.conn.scan(match=match_prefix) keys.extend([f.decode("utf-8") for f in found]) while total != 0: total, found = self.conn.scan(cursor=total, match=match_prefix) keys.extend([f.decode("utf-8") for f in found]) keys.sort() for k in keys: task_dump = self.conn.get(k) tasks.append(pickle.loads(task_dump)) return tasks except RedisError as e: msg = "Tasks not listed: {}".format(e) logger.error(msg) raise TaskRegistryError(cause=msg) finally: self._rwlock.reader_release()
Example #15
Source File: tasks.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def update(self, task_id, task): """Update a task in the registry. Update a task stored in the registry using its task identifier. When the task does not exist, a `NotFoundError` exception will be raised. :param task_id: task identifier :param task: task object :returns: a task object :raises TaskRegistryError: raised when the task is not updated """ self._rwlock.writer_acquire() try: task_key = self._task_key(task_id) found = self.conn.exists(task_key) if not found: logger.warning("Task %s not found, adding it", str(task_id)) self.conn.set(task_key, pickle.dumps(task)) logger.debug("Task %s updated", str(task_id)) except RedisError as e: msg = "Task {} not updated: {}".format(task_id, e) logger.error(msg) raise TaskRegistryError(cause=msg) finally: self._rwlock.writer_release()
Example #16
Source File: tasks.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def remove(self, task_id): """Remove a task from the registry. To remove it, pass its identifier with `task_id` parameter. When the identifier is not found, a `NotFoundError` exception is raised. :param task_id: identifier of the task to remove :raises NotFoundError: raised when the given task identifier is not found in the registry :raises TaskRegistryError: raised when the given task identifier is not removed from the registry """ self._rwlock.writer_acquire() try: task_key = self._task_key(task_id) found = self.conn.exists(task_key) if not found: raise NotFoundError(element=str(task_id)) self.conn.delete(task_key) logger.debug("Task %s removed from the registry", str(task_id)) except RedisError as e: msg = "Task {} not removed: {}".format(task_id, e) logger.error(msg) raise TaskRegistryError(cause=msg) finally: self._rwlock.writer_release()
Example #17
Source File: test_scheduler.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def test_ignore_event_on_task_registry_error(self, mock_redis_get): """Check if an event is ignored when a TaskRegistryError is thrown""" mock_redis_get.side_effect = RedisError self.task_scheduler.registry.add('mytask', 'git', 'commit', {}) handler = CompletedJobHandler(self.task_scheduler) result = JobResult(0, 1, 'mytask', 'git', 'commit') event = JobEvent(JobEventType.COMPLETED, 0, 'mytask', result) handled = handler(event) self.assertEqual(handled, False)
Example #18
Source File: test_scheduler.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def test_ignore_event_on_update_task_registry_error(self, mock_redis_exists): """Check if an event is ignored when a TaskRegistryError is thrown""" mock_redis_exists.side_effect = [False, True, RedisError] self.task_scheduler.registry.add('mytask', 'git', 'commit', {}) handler = StartedJobHandler(self.task_scheduler) result = JobResult(0, 1, 'mytask', 'git', 'commit') event = JobEvent(JobEventType.STARTED, 0, 'mytask', result) handled = handler(event) self.assertEqual(handled, False)
Example #19
Source File: test_scheduler.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 5 votes |
def test_ignore_event_on_get_task_registry_error(self, mock_redis_get): """Check if an event is ignored when a TaskRegistryError is thrown""" mock_redis_get.side_effect = RedisError self.task_scheduler.registry.add('mytask', 'git', 'commit', {}) handler = StartedJobHandler(self.task_scheduler) result = JobResult(0, 1, 'mytask', 'git', 'commit') event = JobEvent(JobEventType.STARTED, 0, 'mytask', result) handled = handler(event) self.assertEqual(handled, False)
Example #20
Source File: __init__.py From redlock-py with MIT License | 5 votes |
def unlock(self, lock): redis_errors = [] for server in self.servers: try: self.unlock_instance(server, lock.resource, lock.key) except RedisError as e: redis_errors.append(e) if redis_errors: raise MultipleRedlockException(redis_errors)
Example #21
Source File: add_transmission_worker.py From trunk-player with MIT License | 4 votes |
def handle(self, *args, **options): q = RedisQueue('new_trans') running = True count = 0 start_time = None while running: try: options['source'] = -1 options['system'] = -1 options['web_url'] = '/' options['verbose'] = False options['m4a'] = False options['vhf'] = False options['json_name'] = None if count == 0: start_time = timezone.now() item = q.get() item_str = item.decode('utf-8') for part in item_str.split('|'): rec = part.split(':') try: options[rec[0]] = rec[1] except IndexError: options[rec[0]] = True print('Adding json {}'.format(options['json_name'])) add_new_trans(options) if count > 100: end_time = timezone.now() print('100 in {} to {}'.format(start_time, end_time )) count = 0 else: count+=1 except ConnectionError: print('Cannot connect to redis is it running?') running = False except KeyboardInterrupt: print('Exiting...') running = False except RedisError: print('Reconnecting...') q = RedisQueue('new_trans') except Exception as e: print('Error') print(e) if options['exitonerror']: raise else: pass
Example #22
Source File: tasks.py From grimoirelab-kingarthur with GNU General Public License v3.0 | 4 votes |
def add(self, task_id, backend, category, backend_args, archiving_cfg=None, scheduling_cfg=None): """Add a task to the registry. This method adds task using `task_id` as identifier. If a task with the same identifier already exists on the registry, a `AlreadyExistsError` exception will be raised. :param task_id: identifier of the task to add :param backend: backend used to fetch data from the repository :param category: category of the items to fetch :param backend_args: dictionary of arguments required to run the backend :param archiving_cfg: archiving config for the task, if needed :param scheduling_cfg: scheduling config for the task, if needed :returns: the new task added to the registry :raises AlreadyExistsError: raised when the given task identifier already exists in the registry :raises TaskRegistryError: raised when the given task identifier is not added to the registry """ self._rwlock.writer_acquire() try: task_key = self._task_key(task_id) found = self.conn.exists(task_key) if found: raise AlreadyExistsError(element=str(task_id)) task = Task(task_id, backend, category, backend_args, archiving_cfg=archiving_cfg, scheduling_cfg=scheduling_cfg) self.conn.set(task_key, pickle.dumps(task)) logger.debug("Task %s added to the registry", str(task_id)) return task except RedisError as e: msg = "Task {} not added: {}".format(task_id, e) logger.error(msg) raise TaskRegistryError(cause=msg) finally: self._rwlock.writer_release()