Python redis.WatchError() Examples
The following are 13
code examples of redis.WatchError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
redis
, or try the search function
.
Example #1
Source File: dist.py From evolution-strategies-starter with MIT License | 6 votes |
def get_current_task(self): with self.local_redis.pipeline() as pipe: while True: try: pipe.watch(TASK_ID_KEY) task_id = int(retry_get(pipe, TASK_ID_KEY)) if task_id == self.cached_task_id: logger.debug('[worker] Returning cached task {}'.format(task_id)) break pipe.multi() pipe.get(TASK_DATA_KEY) logger.info('[worker] Getting new task {}. Cached task was {}'.format(task_id, self.cached_task_id)) self.cached_task_id, self.cached_task_data = task_id, deserialize(pipe.execute()[0]) break except redis.WatchError: continue return self.cached_task_id, self.cached_task_data
Example #2
Source File: dist.py From learning2run with MIT License | 6 votes |
def get_current_task(self): with self.local_redis.pipeline() as pipe: while True: try: pipe.watch(TASK_ID_KEY) task_id = int(retry_get(pipe, TASK_ID_KEY)) if task_id == self.cached_task_id: logger.debug('[worker] Returning cached task {}'.format(task_id)) break pipe.multi() pipe.get(TASK_DATA_KEY) logger.info('[worker] Getting new task {}. Cached task was {}'.format(task_id, self.cached_task_id)) self.cached_task_id, self.cached_task_data = task_id, deserialize(pipe.execute()[0]) break except redis.WatchError: continue return self.cached_task_id, self.cached_task_data
Example #3
Source File: redis_lock.py From tasktiger with MIT License | 6 votes |
def do_renew(self, new_timeout): pipe = self.redis.pipeline() pipe.watch(self.name) lock_value = pipe.get(self.name) if lock_value != self.local.token: raise LockError("Cannot extend a lock that's no longer owned") pipe.multi() pipe.pexpire(self.name, int(new_timeout * 1000)) try: response = pipe.execute() except WatchError: # someone else acquired the lock raise LockError("Cannot extend a lock that's no longer owned") if not response[0]: # pexpire returns False if the key doesn't exist raise LockError("Cannot extend a lock that's no longer owned") return True
Example #4
Source File: helper.py From nidaba with GNU General Public License v2.0 | 6 votes |
def _redis_set_atomically(batch_id, subtask, key, val): """ Atomically sets a field in the Redis batch object to a value. """ with Redis.pipeline() as pipe: while 1: try: pipe.watch(batch_id) batch_struct = json.loads(pipe.get(batch_id)) pipe.multi() batch_struct[subtask][key] = val pipe.set(batch_id, json.dumps(batch_struct)) pipe.execute() break except WatchError: continue
Example #5
Source File: scheduler.py From rpaas with BSD 3-Clause "New" or "Revised" License | 6 votes |
def try_lock(self): interval_delta = datetime.timedelta(seconds=self.interval) with self.conn.pipeline() as pipe: try: now = datetime.datetime.utcnow() pipe.watch(self.last_run_key) last_run = pipe.get(self.last_run_key) if last_run: last_run_date = datetime.datetime.strptime(last_run, DATETIME_FORMAT) if now - last_run_date < interval_delta: pipe.unwatch() return False pipe.multi() pipe.set(self.last_run_key, now.strftime(DATETIME_FORMAT)) pipe.execute() return True except redis.WatchError: return False
Example #6
Source File: queue_green_oa_scrape.py From oadoi with MIT License | 5 votes |
def begin_rate_limit(page, interval_seconds=None): if page.endpoint_id == publisher_equivalent_endpoint_id: return True interval_seconds = interval_seconds or scrape_interval_seconds(page) r = redis.from_url(os.environ.get("REDIS_URL")) started_key = redis_key(page, 'started') finished_key = redis_key(page, 'finished') with r.pipeline() as pipe: try: pipe.watch(started_key) pipe.watch(finished_key) scrape_started = unpickle(r.get(started_key)) scrape_finished = unpickle(r.get(finished_key)) if (scrape_started and scrape_started >= datetime.utcnow() - timedelta(hours=1)) or ( scrape_finished and scrape_finished >= datetime.utcnow() - timedelta(seconds=interval_seconds) ): return False pipe.multi() pipe.set(started_key, pickle.dumps(datetime.utcnow())) pipe.set(finished_key, pickle.dumps(None)) pipe.execute() return True except WatchError: return False
Example #7
Source File: treeitem.py From zing with GNU General Public License v3.0 | 5 votes |
def save_enqueued(self, pipe): """ Preparing job to enqueue. Works via pipeline. Nothing done if WatchError happens while next `pipeline.execute()`. """ job = self.create_job(status=JobStatus.QUEUED) self.set_job_params(pipeline=pipe) job.origin = self.origin job.enqueued_at = utcnow() if job.timeout is None: job.timeout = self.timeout job.save(pipeline=pipe) self.job = job
Example #8
Source File: treeitem.py From zing with GNU General Public License v3.0 | 5 votes |
def save_deferred(self, depends_on, pipe): """ Preparing job to defer (add as dependent). Works via pipeline. Nothing done if WatchError happens while next `pipeline.execute()`. """ job = self.create_job(depends_on=depends_on, status=JobStatus.DEFERRED) self.set_job_params(pipeline=pipe) job.register_dependency(pipeline=pipe) job.save(pipeline=pipe) return job
Example #9
Source File: nidaba.py From nidaba with GNU General Public License v2.0 | 5 votes |
def add_document(self, doc): """Add a document to the batch. Adds a document tuple to the batch and checks if it exists. Args: doc (tuple): A standard document tuple. Raises: NidabaInputException: The document tuple does not refer to a file. """ if self.lock: raise NidabaInputException('Executed batch may not be modified') if not self.storage.is_file(*doc): raise NidabaInputException('Input document is not a file.') with self.redis.pipeline() as pipe: while(1): try: pipe.watch(self.id) self._restore_and_create_scratchpad(pipe) self.docs.append(doc) self.scratchpad['scratchpad']['docs'] = self.docs pipe.set(self.id, json.dumps(self.scratchpad)) pipe.execute() break except WatchError: continue
Example #10
Source File: nidaba.py From nidaba with GNU General Public License v2.0 | 5 votes |
def rm_document(self, doc): """Removes a document from the (unexecuted) batch. Removes a document tuple from the batch. Args: doc (tuple): A standard document tuple. Raises: NidabaInputException: The document tuple does not refer to a file. """ if self.lock: raise NidabaInputException('Executed batch may not be modified') with self.redis.pipeline() as pipe: while(1): try: pipe.watch(self.id) self._restore_and_create_scratchpad(pipe) self.docs.remove(list(doc)) self.scratchpad['scratchpad']['docs'] = self.docs pipe.set(self.id, json.dumps(self.scratchpad)) pipe.execute() break except WatchError: continue except ValueError: raise NidabaInputException('Document not part of the batch')
Example #11
Source File: rate_limiter.py From zulip with Apache License 2.0 | 4 votes |
def incr_ratelimit(cls, entity_key: str, max_api_calls: int, max_api_window: int) -> None: """Increases the rate-limit for the specified entity""" list_key, set_key, _ = cls.get_keys(entity_key) now = time.time() # Start redis transaction with client.pipeline() as pipe: count = 0 while True: try: # To avoid a race condition between getting the element we might trim from our list # and removing it from our associated set, we abort this whole transaction if # another agent manages to change our list out from under us # When watching a value, the pipeline is set to Immediate mode pipe.watch(list_key) # Get the last elem that we'll trim (so we can remove it from our sorted set) last_val = pipe.lindex(list_key, max_api_calls - 1) # Restart buffered execution pipe.multi() # Add this timestamp to our list pipe.lpush(list_key, now) # Trim our list to the oldest rule we have pipe.ltrim(list_key, 0, max_api_calls - 1) # Add our new value to the sorted set that we keep # We need to put the score and val both as timestamp, # as we sort by score but remove by value pipe.zadd(set_key, {str(now): now}) # Remove the trimmed value from our sorted set, if there was one if last_val is not None: pipe.zrem(set_key, last_val) # Set the TTL for our keys as well api_window = max_api_window pipe.expire(list_key, api_window) pipe.expire(set_key, api_window) pipe.execute() # If no exception was raised in the execution, there were no transaction conflicts break except redis.WatchError: # nocoverage # Ideally we'd have a test for this. if count > 10: raise RateLimiterLockingException() count += 1 continue
Example #12
Source File: nidaba.py From nidaba with GNU General Public License v2.0 | 4 votes |
def __init__(self, id=None): # stuff depending on a valid configuration from nidaba import storage from nidaba import config self.storage = storage # slowly importing stuff from nidaba import tasks from nidaba import plugins from nidaba import celery self.task_reg = tasks self.celery = celery self.id = id if self.id is None: self.id = uuid.uuid4().get_hex() self.storage.prepare_filestore(self.id) if not self.storage.is_valid_job(self.id): raise NidabaInputException('Storage not prepared for task') self.docs = [] self.scratchpad = {} self.redis = config.Redis self.tasks = OrderedDict([('img', []), ('binarize', []), ('segmentation', []), ('ocr', []), ('stats', []), ('postprocessing', []), ('output', []), ('archive', [])]) # defines if tasks in a group are run in parallel or in sequence and their merge mode self.order = {'img': ('sequence', False), 'binarize': ('parallel', False), 'segmentation': ('parallel', False), 'ocr': ('parallel', False), 'stats': ('parallel', False), 'postprocessing': ('sequence', 'doc'), 'output': ('sequence', False), 'archive': ('parallel', True)} self.lock = False with self.redis.pipeline() as pipe: while(1): try: pipe.watch(self.id) self._restore_and_create_scratchpad(pipe) if 'scratchpad' not in self.scratchpad: self.lock = True pipe.execute() break except WatchError: continue
Example #13
Source File: nidaba.py From nidaba with GNU General Public License v2.0 | 4 votes |
def add_task(self, group, method, **kwargs): """Add a task. Adds a ``task``, a single executable task gathering one or more input documents and returning a single output document, to the current tick. Multiple jobs are run in parallel. Args: group (unicode): A task group identifier method (unicode): A task identifier **kwargs: Arguments to the task Raises: NidabaInputException: Trying to modify executed task. NidabaNoSuchAlgorithmException: Invalid method given. """ if self.lock: raise NidabaInputException('Executed batch may not be modified') # validate that the task exists if group not in self.tasks: raise NidabaNoSuchAlgorithmException('Unknown task group {}'.format(group)) if u'nidaba.{}.{}'.format(group, method) not in self.celery.app.tasks: raise NidabaNoSuchAlgorithmException('Unknown task {} {}'.format(group, method)) task = self.celery.app.tasks[u'nidaba.{}.{}'.format(group, method)] # validate arguments first against getcallargs try: getcallargs(task.run, ('', ''), **kwargs) except TypeError as e: raise NidabaInputException(str(e)) # validate against arg_values field of the task task_arg_validator(task.get_valid_args(), **kwargs) with self.redis.pipeline() as pipe: while(1): try: pipe.watch(self.id) self._restore_and_create_scratchpad(pipe) self.tasks[group].append((method, kwargs)) self.scratchpad['scratchpad']['simple_tasks'] = self.tasks pipe.set(self.id, json.dumps(self.scratchpad)) pipe.execute() break except WatchError: continue