Python django.core.cache.cache.add() Examples
The following are 30
code examples of django.core.cache.cache.add().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
django.core.cache.cache
, or try the search function
.
Example #1
Source File: mock.py From controller with MIT License | 6 votes |
def add_cleanup_pod(url): """populate the cleanup pod list""" # variance allows a pod to stay alive past grace period variance = random.uniform(0.1, 1.5) grace = round(settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS * variance) # save pods = cache.get('cleanup_pods', {}) pods[url] = (datetime.utcnow() + timedelta(seconds=grace)) cache.set('cleanup_pods', pods) # add grace period timestamp pod = cache.get(url) grace = settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS pd = datetime.utcnow() + timedelta(seconds=grace) timestamp = str(pd.strftime(MockSchedulerClient.DATETIME_FORMAT)) pod['metadata']['deletionTimestamp'] = timestamp cache.set(url, pod)
Example #2
Source File: tasks.py From astrobin with GNU Affero General Public License v3.0 | 6 votes |
def update_top100_ids(): from astrobin.models import Image LOCK_EXPIRE = 60 * 5 # Lock expires in 5 minutes lock_id = 'top100_ids_lock' # cache.add fails if the key already exists acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE) # memcache delete is very slow, but we have to use it to take # advantage of using add() for atomic locking release_lock = lambda: cache.delete(lock_id) logger.debug('Building Top100 ids...') if acquire_lock(): try: sqs = SearchQuerySet().models(Image).order_by('-likes') top100_ids = [int(x.pk) for x in sqs][:100] cache.set('top100_ids', top100_ids, 60 * 60 * 24) finally: release_lock() return logger.debug( 'Top100 ids task is already being run by another worker')
Example #3
Source File: test_tasks.py From mitoc-trips with GNU General Public License v3.0 | 6 votes |
def test_discount_tasks_share_same_key( update_participant, update_discount_sheet, mock_cache ): """ All tasks modifying the same discount sheet must share a task ID. This prevents multiple tasks modifying the Google Sheet at the same time. """ discount = factories.DiscountFactory.create(pk=8675) participant = factories.ParticipantFactory.create() expected_lock_id = 'update_discount-8675' tasks.update_discount_sheet_for_participant(discount.pk, participant.pk) mock_cache.add.assert_called_with(expected_lock_id, 'true', 600) tasks.update_discount_sheet(discount.pk) mock_cache.add.assert_called_with(expected_lock_id, 'true', 600)
Example #4
Source File: tasks.py From mitoc-trips with GNU General Public License v3.0 | 6 votes |
def update_discount_sheet_for_participant(discount_id, participant_id): """ Lock the sheet and add/update a single participant. This task should not run at the same time that we're updating the sheet for another participant (or for all participants, as we do nightly). """ discount = models.Discount.objects.get(pk=discount_id) participant = models.Participant.objects.get(pk=participant_id) if settings.DISABLE_GSHEETS: logger.warning( "Google Sheets functionality is disabled, not updating " "'%s' for %s", discount.name, participant.name, ) return member_sheets.update_participant(discount, participant)
Example #5
Source File: tasks.py From mitoc-trips with GNU General Public License v3.0 | 6 votes |
def exclusive_lock(task_identifier): """ Obtain an exclusively lock, using the task_identifier as a unique ID. This helps prevents the case of multiple workers executing the same task at the same time, which can cause unexpected side effects. """ # See: https://celery.readthedocs.io/en/latest/tutorials/task-cookbook.html # Plan to timeout a few seconds before the limit # (After `LOCK_EXPIRE` seconds have passed, the cache will self-clean) timeout_at = monotonic() + LOCK_EXPIRE - 3 # Try to add the value to the cache. # Returns False if already cached (another worker added it already) # Returns True otherwise (this worker is the first to add the key) got_lock = cache.add(task_identifier, 'true', LOCK_EXPIRE) # Yield our ability to obtain a lock, but always cleanup try: yield got_lock finally: # If `got_lock` was False, we don't own the lock - never clean up # If we're close to the timeout, just let the cache self-clean if got_lock and monotonic() < timeout_at: cache.delete(task_identifier)
Example #6
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set("key1", "eggs", None) assert cache.get("key1") == "eggs" cache.add("key2", "ham", None) assert cache.get("key2") == "ham" added = cache.add("key1", "new eggs", None) assert not added assert cache.get("key1") == "eggs" cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, None) assert cache.get("key3") == "sausage" assert cache.get("key4") == "lobster bisque"
Example #7
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_long_timeout(self): """ Using a timeout greater than 30 days makes memcached think it is an absolute expiration timestamp instead of a relative offset. Test that we honour this convention. Refs #12399. """ cache.set("key1", "eggs", 60 * 60 * 24 * 30 + 1) # 30 days + 1 second assert cache.get("key1") == "eggs" cache.add("key2", "ham", 60 * 60 * 24 * 30 + 1) assert cache.get("key2") == "ham" cache.set_many( {"key3": "sausage", "key4": "lobster bisque"}, 60 * 60 * 24 * 30 + 1 ) assert cache.get("key3") == "sausage" assert cache.get("key4") == "lobster bisque"
Example #8
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = "value_to_be_compressed" compressed_value = compress(value.encode()) # Test set cache.set("binary1", compressed_value) compressed_result = cache.get("binary1") assert compressed_value == compressed_result assert value == decompress(compressed_result).decode() # Test add cache.add("binary1-add", compressed_value) compressed_result = cache.get("binary1-add") assert compressed_value == compressed_result assert value == decompress(compressed_result).decode() # Test set_many cache.set_many({"binary1-set_many": compressed_value}) compressed_result = cache.get("binary1-set_many") assert compressed_value == compressed_result assert value == decompress(compressed_result).decode()
Example #9
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_unicode(self): # Unicode values can be cached stuff = { "ascii": "ascii_value", "unicode_ascii": "Iñtërnâtiônàlizætiøn1", "Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2", "ascii2": {"x": 1}, } # Test `set` for (key, value) in stuff.items(): cache.set(key, value) assert cache.get(key) == value # Test `add` for (key, value) in stuff.items(): cache.delete(key) cache.add(key, value) assert cache.get(key) == value # Test `set_many` for key, _value in stuff.items(): cache.delete(key) cache.set_many(stuff) for (key, value) in stuff.items(): assert cache.get(key) == value
Example #10
Source File: log.py From GloboNetworkAPI with Apache License 2.0 | 6 votes |
def get_lock(): """Obtém lock para evitar que várias mensagens sejam sobrepostas no log""" try: from django.core.cache import cache cache.default_timeout = 0 if cache._cache and hasattr(cache._cache, 'get_stats'): stats = cache._cache.get_stats() else: stats = [] if stats: while cache.add('logger_lock', 1, 1) == 0: time.sleep(0.1) pass except ImportError: dump_file = open('/tmp/networkapi_log_error_dump', 'a') traceback.print_exc(file=dump_file) dump_file.close() pass
Example #11
Source File: mock.py From controller with MIT License | 6 votes |
def add_cleanup_pod(url): """populate the cleanup pod list""" # variance allows a pod to stay alive past grace period variance = random.uniform(0.1, 1.5) grace = round(settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS * variance) # save pods = cache.get('cleanup_pods', {}) pods[url] = (datetime.utcnow() + timedelta(seconds=grace)) cache.set('cleanup_pods', pods) # add grace period timestamp pod = cache.get(url) grace = settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS pd = datetime.utcnow() + timedelta(seconds=grace) timestamp = str(pd.strftime(MockSchedulerClient.DATETIME_FORMAT)) pod['metadata']['deletionTimestamp'] = timestamp cache.set(url, pod)
Example #12
Source File: tasks.py From MetaCI with BSD 3-Clause "New" or "Revised" License | 5 votes |
def lock_org(org, build_id, timeout): return cache.add(org.lock_id, f"build-{build_id}", timeout=timeout)
Example #13
Source File: models.py From MetaCI with BSD 3-Clause "New" or "Revised" License | 5 votes |
def lock(self): if not self.scratch: cache.add(self.lock_id, "manually locked", timeout=None)
Example #14
Source File: models.py From MetaCI with BSD 3-Clause "New" or "Revised" License | 5 votes |
def for_user(self, user, perms=None): if user.is_superuser: return self if perms is None: perms = "plan.org_login" PlanRepository = apps.get_model("plan.PlanRepository") planrepos = PlanRepository.objects.for_user(user, perms) planrepos = planrepos.values("plan__org", "repo") q = models.Q() for plan_org in planrepos: q.add( models.Q(name=plan_org["plan__org"], repo_id=plan_org["repo"]), models.Q.OR, ) return self.filter(q)
Example #15
Source File: models.py From digihel with MIT License | 5 votes |
def _linked_events(self): events = cache.get('linkedevents') if events: return events # the methods are assumed to return events latest first url = 'https://{}event/{}&include=location&sort=-end_time&page_size=100'.format( self.urls[self.data_source], self.linkedevents_params) event_list = requests.get(url).json() events = event_list.get('data') # we will be happy with 100 latest events for now cache.add('linkedevents', events, 3600) return events
Example #16
Source File: models.py From digihel with MIT License | 5 votes |
def _facebook_events(self): if not hasattr(settings, 'FACEBOOK_APP_ID') or not hasattr(settings, 'FACEBOOK_APP_SECRET'): return [] events = cache.get('facebook') if events: return events events = [] # facebook feed returns events latest first url = 'https://{}{}?fields=feed{{link,message,object_id}}&access_token={}|{}'.format( self.urls[self.data_source], self.facebook_page_id, str(settings.FACEBOOK_APP_ID), settings.FACEBOOK_APP_SECRET) feed = requests.get(url).json()['feed']['data'] # filter the events from the feed for item in feed: if 'link' in item: if 'https://www.facebook.com/events/' in str(item['link']): events.append(item) # fetch details for the events event_ids = ','.join([event['object_id'] for event in events]) details = [] url = 'https://{}?ids={}&fields=description,cover,end_time,name,start_time,id,picture,place&access_token={}|{}'.format( self.urls[self.data_source], event_ids, str(settings.FACEBOOK_APP_ID), settings.FACEBOOK_APP_SECRET) details = requests.get(url).json() for event in events: event['details'] = details[event['object_id']] cache.add('facebook', events, 3600) return events
Example #17
Source File: tasks.py From astrobin with GNU Affero General Public License v3.0 | 5 votes |
def retrieve_thumbnail(pk, alias, options): from astrobin.models import Image revision_label = options.get('revision_label', 'final') LOCK_EXPIRE = 1 lock_id = 'retrieve_thumbnail_%d_%s_%s' % (pk, revision_label, alias) acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE) release_lock = lambda: cache.delete(lock_id) def set_thumb(): url = thumb.url field = image.get_thumbnail_field(revision_label) if not field.name.startswith('images/'): field.name = 'images/' + field.name cache_key = image.thumbnail_cache_key(field, alias) cache.set(cache_key, url, 60 * 60 * 24 * 365) logger.debug("Image %d: saved generated thumbnail in the cache." % image.pk) thumbnails, created = ThumbnailGroup.objects.get_or_create(image=image, revision=revision_label) setattr(thumbnails, alias, url) thumbnails.save() logger.debug("Image %d: saved generated thumbnail in the database." % image.pk) cache.delete('%s.retrieve' % cache_key) if acquire_lock(): try: image = Image.all_objects.get(pk=pk) thumb = image.thumbnail_raw(alias, options) if thumb: set_thumb() else: logger.debug("Image %d: unable to generate thumbnail." % image.pk) except Exception as e: logger.debug("Error retrieving thumbnail: %s" % e.message) finally: release_lock() return logger.debug('retrieve_thumbnail task is already running')
Example #18
Source File: mock.py From controller with MIT License | 5 votes |
def _acquire(self): return cache.add(self.key, 'true', self.timeout)
Example #19
Source File: test_tasks.py From mitoc-trips with GNU General Public License v3.0 | 5 votes |
def test_lock_always_released(self): """ Even when raising exceptions, the lock is released. """ @tasks.mutex_task() def divide(numerator, denominator): return numerator / denominator with self.assertRaises(ZeroDivisionError): divide(3, 0) self.cache.add.assert_called_with('divide', 'true', 600) self.cache.delete.assert_called_with('divide')
Example #20
Source File: test_tasks.py From mitoc-trips with GNU General Public License v3.0 | 5 votes |
def test_lock_format_custom_naming(self): """ The decorator accepts a string that formats the task ID. Specifically, this decorator can access both positional arguments and optional arguments. """ @tasks.mutex_task('{positional_arg}-{named_kwarg2}') def dummy_task(positional_arg, named_kwarg='123', named_kwarg2=None): pass dummy_task('hello', named_kwarg2='there') self.cache.add.assert_called_with('hello-there', 'true', 600) self.cache.delete.assert_called_with('hello-there')
Example #21
Source File: test_tasks.py From mitoc-trips with GNU General Public License v3.0 | 5 votes |
def test_lock_format_default_naming(self): """ By default, we just use the function name as a unique lock ID. """ @tasks.mutex_task() def some_unique_task_name(positional_arg): pass some_unique_task_name('a_string_argument') self.cache.add.assert_called_with('some_unique_task_name', 'true', 600) self.cache.delete.assert_called_with('some_unique_task_name')
Example #22
Source File: lock.py From django-google-adwords with MIT License | 5 votes |
def acquire_googleadwords_lock(model, idenitier): # cache.add fails if if the key already exists return cache.add(get_googleadwords_lock_id(model, idenitier), "true", settings.GOOGLEADWORDS_LOCK_TIMEOUT)
Example #23
Source File: lock.py From django-google-adwords with MIT License | 5 votes |
def release_googleadwords_lock(model, idenitier): # memcache delete is very slow, but we have to use it to take # advantage of using add() for atomic locking return cache.delete(get_googleadwords_lock_id(model, idenitier))
Example #24
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_add_with_expired(self): cache.add("mykey", "value", 0.3) assert cache.get("mykey") == "value" result = cache.add("mykey", "newvalue", 0.3) assert not result assert cache.get("mykey") == "value" time.sleep(0.4) result = cache.add("mykey", "newvalue", 60) assert result assert cache.get("mykey") == "newvalue"
Example #25
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_expiration(self): # Cache values can be set to expire cache.set("expire1", "very quickly", 0.1) cache.set("expire2", "very quickly", 0.1) cache.set("expire3", "very quickly", 0.1) time.sleep(0.2) assert cache.get("expire1") is None cache.add("expire2", "newvalue") assert cache.get("expire2") == "newvalue" assert not cache.has_key("expire3") # noqa
Example #26
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_add_fail_on_pickleerror(self): "See https://code.djangoproject.com/ticket/21200" with pytest.raises(pickle.PickleError): cache.add("unpickable", Unpickable())
Example #27
Source File: test_cache.py From django-mysql with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 cache.add("answer1", 42, version=2) assert cache.get("answer1", version=1) is None assert cache.get("answer1", version=2) == 42 cache.add("answer1", 37, version=2) assert cache.get("answer1", version=1) is None assert cache.get("answer1", version=2) == 42 cache.add("answer1", 37, version=1) assert cache.get("answer1", version=1) == 37 assert cache.get("answer1", version=2) == 42 # v2 add, using default version = 2 caches["v2"].add("answer2", 42) assert cache.get("answer2", version=1) is None assert cache.get("answer2", version=2) == 42 caches["v2"].add("answer2", 37) assert cache.get("answer2", version=1) is None assert cache.get("answer2", version=2) == 42 caches["v2"].add("answer2", 37, version=1) assert cache.get("answer2", version=1) == 37 assert cache.get("answer2", version=2) == 42 # v2 add, default version = 2, but manually override version = 1 caches["v2"].add("answer3", 42, version=1) assert cache.get("answer3", version=1) == 42 assert cache.get("answer3", version=2) is None caches["v2"].add("answer3", 37, version=1) assert cache.get("answer3", version=1) == 42 assert cache.get("answer3", version=2) is None caches["v2"].add("answer3", 37) assert cache.get("answer3", version=1) == 42 assert cache.get("answer3", version=2) == 37
Example #28
Source File: tasks.py From django-ethereum-events with MIT License | 5 votes |
def cache_lock(lock_id, lock_value): """Cache based locking mechanism. Cache backends `memcached` and `redis` are recommended. """ # cache.add fails if the key already exists status = cache.add(lock_id, lock_value) try: yield status finally: cache.delete(lock_id)
Example #29
Source File: utils.py From opensurfaces with MIT License | 5 votes |
def single_instance_task(timeout=3600 * 12): """ Decorator that ensures that a celery task is only run once. Default timeout is 12 hours. See: http://stackoverflow.com/questions/4095940/running-unique-tasks-with-celery See: http://ask.github.com/celery/cookbook/tasks.html#ensuring-a-task-is-only-executed-one-at-a-time .. note:: This only works if all tasks share the same django cache. """ def task_exc(func): def wrapper(*args, **kwargs): lock_id = "single_instance_task:" + func.__name__ acquire_lock = lambda: cache.add(lock_id, True, timeout) release_lock = lambda: cache.delete(lock_id) if acquire_lock(): try: func(*args, **kwargs) finally: try: release_lock() except: pass else: logger.info('Task %s already running' % func.__name__) wrapper.__name__ = func.__name__ return wrapper return task_exc
Example #30
Source File: throttle_mail.py From online-judge with GNU Affero General Public License v3.0 | 5 votes |
def new_email(): cache.add('error_email_throttle', 0, settings.DMOJ_EMAIL_THROTTLING[1]) return cache.incr('error_email_throttle')