Python cachetools() Examples

The following are 25 code examples of cachetools(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cachetools , or try the search function .
Example #1
Source File: UrlUpserter.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 8 votes vote down vote up
def __init__(self, msg_queue, db_interface):
		self.response_queue = msg_queue
		self.log = logging.getLogger("Main.LinkAggregator")

		try:
			signal.signal(signal.SIGINT, signal.SIG_IGN)
		except ValueError:
			self.log.warning("Cannot configure job fetcher task to ignore SIGINT. May be an issue.")

		# LRU Cache with a maxsize of 1 million, and a TTL of 6 hours
		self.seen = cachetools.TTLCache(maxsize=1000 * 1000, ttl=60 * 60 * 6)

		self.queue_items = 0
		self.link_count = 0
		self.amqpUpdateCount = 0
		self.deathCounter = 0

		self.batched_links = []
		self.pending_upserts = []

		self.db_int = db_interface
		self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=6)

		self.check_init_func() 
Example #2
Source File: __init__.py    From hummingbot with Apache License 2.0 6 votes vote down vote up
def async_ttl_cache(ttl: int = 3600, maxsize: int = 1):
    cache = cachetools.TTLCache(ttl=ttl, maxsize=maxsize)

    def decorator(fn):
        @functools.wraps(fn)
        async def memoize(*args, **kwargs):
            key = str((args, kwargs))
            try:
                cache[key] = cache.pop(key)
            except KeyError:
                cache[key] = await fn(*args, **kwargs)
            return cache[key]
        return memoize

    return decorator 
Example #3
Source File: rss_func_db.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_feed_article_meta(feedid):
	global QIDIAN_META_CACHE
	if feedid in QIDIAN_META_CACHE:
		return QIDIAN_META_CACHE[feedid]

	sess = get_db_session(flask_sess_if_possible=False)
	have = sess.query(QidianFeedPostMeta).filter(QidianFeedPostMeta.contentid == feedid).scalar()
	if have:
		ret = have.meta
	else:
		ret = {}

	sess.commit()

	try:
		QIDIAN_META_CACHE[feedid] = ret
	except KeyError:
		QIDIAN_META_CACHE = cachetools.TTLCache(maxsize=5000, ttl=60 * 5)
		QIDIAN_META_CACHE[feedid] = ret


	return ret 
Example #4
Source File: core.py    From brawlstats with MIT License 6 votes vote down vote up
def __init__(self, token, session=None, timeout=30, is_async=False, **options):
        # Async options
        self.is_async = is_async
        self.loop = options.get('loop', asyncio.get_event_loop()) if self.is_async else None
        self.connector = options.get('connector')

        # Session and request options
        self.session = options.get('session') or (
            aiohttp.ClientSession(loop=self.loop, connector=self.connector) if self.is_async else requests.Session()
        )
        self.timeout = timeout
        self.prevent_ratelimit = options.get('prevent_ratelimit', False)
        self.api = API(options.get('base_url'), version=1)

        self.debug = options.get('debug', False)
        self.cache = TTLCache(3200 * 3, 60 * 3)  # 3200 requests per minute

        # Request/response headers
        self.headers = {
            'Authorization': 'Bearer {}'.format(token),
            'User-Agent': 'brawlstats/{0} (Python {1[0]}.{1[1]})'.format(self.api.VERSION, sys.version_info),
            'Accept-Encoding': 'gzip'
        } 
Example #5
Source File: caches.py    From endpoints-management-python with Apache License 2.0 6 votes vote down vote up
def __init__(self, maxsize, ttl, out_deque=None, **kw):
        """Constructor.

        Args:
          maxsize (int): the maximum number of entries in the queue
          ttl (int): the ttl for entries added to the cache
          out_deque :class:`collections.deque`: a `deque` in which to add items
            that expire from the cache
          **kw: the other keyword args supported by the constructor to
            :class:`cachetools.TTLCache`

        Raises:
          ValueError: if out_deque is not a collections.deque

        """
        super(DequeOutTTLCache, self).__init__(maxsize, ttl, **kw)
        if out_deque is None:
            out_deque = collections.deque()
        elif not isinstance(out_deque, collections.deque):
            raise ValueError(u'out_deque should be a collections.deque')
        self._out_deque = out_deque
        self._tracking = {} 
Example #6
Source File: blockchain.py    From Zilliqa-Mining-Proxy with GNU General Public License v3.0 5 votes vote down vote up
def init(cls, conf):
        cls.config = conf
        cls.zil_conf = conf["zilliqa"]
        cls.api = zilliqa_api.API(cls.zil_conf["api_endpoint"])
        cls.cache = TTLCache(maxsize=64, ttl=cls.zil_conf["update_interval"]) 
Example #7
Source File: helper.py    From resilient-community-apps with MIT License 5 votes vote down vote up
def __init__(self, tenant_id, client_id, client_secret):
        self.__cache = TTLCache(maxsize=1, ttl=55*60)  # Set to expire after 55 minutes so it is always fresh

        self.__tenant_id = tenant_id
        self.__client_id = client_id,
        self.__client_secret = client_secret
        self.__get_cache('microsoft_security_graph_access_token') 
Example #8
Source File: fvcom.py    From Ocean-Data-Map-Project with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, nc_data: Union[CalculatedData, NetCDFData]) -> None:
        super().__init__(nc_data)
        self.nc_data = nc_data
        self.variables = nc_data.variables
        self._kdt: KDTree = [None, None]
        self.__timestamp_cache: TTLCache = TTLCache(1, 3600) 
Example #9
Source File: netcdf_data.py    From Ocean-Data-Map-Project with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, url: str, **kwargs: Dict) -> None:
        super().__init__(url)
        self.meta_only: bool = kwargs.get('meta_only', False)
        self.dataset: Union[xarray.Dataset, netCDF4.Dataset] = None
        self._variable_list: VariableList = None
        self.__timestamp_cache: TTLCache = TTLCache(1, 3600)
        self._nc_files: list = []
        self._grid_angle_file_url: str = kwargs.get('grid_angle_file_url', "")
        self._time_variable: xarray.IndexVariable = None
        self._dataset_open: bool = False
        self._dataset_key: str = kwargs.get('dataset_key', "")
        self._dataset_config: DatasetConfig = (
            DatasetConfig(self._dataset_key) if self._dataset_key else None
        ) 
Example #10
Source File: test_sqlite_remote.py    From terracotta with MIT License 5 votes vote down vote up
def test_remote_database_cache(s3_db_factory, raster_file, monkeypatch):
    keys = ('some', 'keys')
    dbpath = s3_db_factory(keys)

    from terracotta import get_driver

    driver = get_driver(dbpath)
    with monkeypatch.context() as m:
        # replace TTL cache timer by manual timer
        m.setattr(driver, '_checkdb_cache', TTLCache(maxsize=1, ttl=1, timer=Timer()))
        assert len(driver._checkdb_cache) == 0

        with driver.connect():
            assert driver.key_names == keys
            assert driver.get_datasets() == {}
            modification_date = os.path.getmtime(driver.path)

            s3_db_factory(keys, datasets={('some', 'value'): str(raster_file)})

            # no change yet
            assert driver.get_datasets() == {}
            assert os.path.getmtime(driver.path) == modification_date

        # check if remote db is cached after one tick
        driver._checkdb_cache.timer.tick()
        assert len(driver._checkdb_cache) == 1

        with driver.connect():  # db connection is cached; so still no change
            assert driver.get_datasets() == {}
            assert os.path.getmtime(driver.path) == modification_date

        # TTL cache is invalidated after second tick
        driver._checkdb_cache.timer.tick()
        assert len(driver._checkdb_cache) == 0

        with driver.connect():  # now db is updated on reconnect
            assert list(driver.get_datasets().keys()) == [('some', 'value')]
            assert os.path.getmtime(driver.path) != modification_date 
Example #11
Source File: sqlite_remote.py    From terracotta with MIT License 5 votes vote down vote up
def __init__(self, remote_path: str) -> None:
        """Initialize the RemoteSQLiteDriver.

        This should not be called directly, use :func:`~terracotta.get_driver` instead.

        Arguments:

            remote_path: S3 URL in the form ``s3://bucket/key`` to remote SQLite database
                (has to exist).

        """
        settings = get_settings()

        self.__rm = os.remove  # keep reference to use in __del__

        os.makedirs(settings.REMOTE_DB_CACHE_DIR, exist_ok=True)
        local_db_file = tempfile.NamedTemporaryFile(
            dir=settings.REMOTE_DB_CACHE_DIR,
            prefix='tc_s3_db_',
            suffix='.sqlite',
            delete=False
        )
        local_db_file.close()

        self._remote_path: str = str(remote_path)
        self._checkdb_cache = TTLCache(maxsize=1, ttl=settings.REMOTE_DB_CACHE_TTL)

        super().__init__(local_db_file.name) 
Example #12
Source File: metadata_cache.py    From biggraphite with Apache License 2.0 5 votes vote down vote up
def open(self):
        """Allocate ressources used by the cache."""
        super(MemoryCache, self).open()

        def _timer():
            # Use a custom timer to try to spread expirations. Within one instance it
            # won't change anything but it will be better if you run multiple instances.
            return time.time() + self.__ttl * random.uniform(-0.25, 0.25)

        self.__cache = cachetools.TTLCache(
            maxsize=self.__size, ttl=self.__ttl, timer=_timer
        ) 
Example #13
Source File: accessor_cache.py    From biggraphite with Apache License 2.0 5 votes vote down vote up
def __init__(self, size, ttl):
        """Initialize the memory cache."""
        super(MemoryCache, self).__init__()
        self.__size = size
        self.__ttl = ttl
        self.__cache = cachetools.TTLCache(maxsize=self.__size, ttl=self.__ttl) 
Example #14
Source File: notification.py    From kubernetes-ec2-autoscaler with MIT License 5 votes vote down vote up
def __init__(self, hook=None, bot_token=None):
        self.hook = hook
        self.bot_token = bot_token

        self.cache = TTLCache(maxsize=128, ttl=60*30) 
Example #15
Source File: assets.py    From gs-quant with Apache License 2.0 5 votes vote down vote up
def _cached(fn):
    _fn_cache_lock = threading.Lock()
    # short-term cache to avoid retrieving the same data several times in succession
    cache = cachetools.TTLCache(1024, 30) if os.environ.get('GSQ_SEC_MASTER_CACHE') else None

    @wraps(fn)
    def wrapper(*args, **kwargs):
        if cache is not None:
            args = [tuple(x) if isinstance(x, list) else x for x in args]  # tuples are hashable
            k = cachetools.keys.hashkey(GsSession.current, *args, **kwargs)
            with metalock:
                invocation_lock = invocation_locks.setdefault(f'{fn.__name__}:{k}', threading.Lock())
            with invocation_lock:
                with _fn_cache_lock:
                    result = cache.get(k)
                if result:
                    _logger.debug('%s cache hit: %s, %s', fn.__name__, str(args), str(kwargs))
                    return result
                result = fn(*args, **kwargs)
                with _fn_cache_lock:
                    cache[k] = result
        else:
            result = fn(*args, **kwargs)
        return result

    return wrapper 
Example #16
Source File: default.py    From python with Apache License 2.0 5 votes vote down vote up
def __init__(self, **cache_options):
        self.cache = cachetools.TTLCache(**cache_options) 
Example #17
Source File: caching.py    From apex-sigma-core with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, cfg):
        """
        :type cfg: sigma.core.mechanics.config.CacheConfig
        :param cfg: The CacheConfig object for getting the configuration parameters.
        """
        super().__init__(cfg)
        self.cache = cachetools.TTLCache(self.cfg.size, self.cfg.time) 
Example #18
Source File: paginator.py    From apex-sigma-core with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.paginators = cachetools.TTLCache(500, 300) 
Example #19
Source File: threat_webservice.py    From resilient-python-api with MIT License 5 votes vote down vote up
def __init__(self, opts):
        super(CustomThreatService, self).__init__(**_make_args(opts))

        # Configurable options
        self.options = opts.get(CONFIG_SECTION, {})

        # Do we support "file-content" artifacts?  Default is no.
        # TODO add implementation support to parse the file content
        self.support_upload_file = bool(self.options.get(CONFIG_UPLOAD_FILE.key, CONFIG_UPLOAD_FILE.default))

        # Default time that this service will tell Resilient to retry
        self.first_retry_secs = int(self.options.get(CONFIG_FIRST_RETRY_SECS.key, CONFIG_FIRST_RETRY_SECS.default)) or 5
        self.later_retry_secs = int(self.options.get(CONFIG_LATER_RETRY_SECS.key, CONFIG_LATER_RETRY_SECS.default)) or 60

        # Size of the request cache
        self.cache_size = int(self.options.get(CONFIG_CACHE_SIZE.key, CONFIG_CACHE_SIZE.default))
        # TTL of the request cache (millis before we give up on a request lookup)
        self.cache_ttl = int(self.options.get(CONFIG_CACHE_TTL.key, CONFIG_CACHE_TTL.default))

        # Limit to the number of queries we'll answer for unfinished searchers (count before giving up on them)
        self.max_retries = int(self.options.get(CONFIG_MAX_RETRIES.key, CONFIG_MAX_RETRIES.default))

        # IDs and their results are maintained in a cache so that we can set
        # an upper bound on the number of in-progress and recent lookups.
        self.cache = TTLCache(maxsize=self.cache_size, ttl=self.cache_ttl)

        # Helper component does event dispatch work
        self.async_helper = CustomThreatServiceHelper(self)
        (self.helper_thread, self.bridge) = self.async_helper.start()

        urls = ["{0}/{1}".format(self.channel, e) for e in self.events()]
        LOG.info("Web handler for %s", ", ".join(urls))

    # Web endpoints 
Example #20
Source File: hash_request_cache.py    From cloudify-manager with Apache License 2.0 5 votes vote down vote up
def __init__(self):
        ttl = 60 * 5
        max_size = 500
        self._cache = TTLCache(max_size, ttl) 
Example #21
Source File: MessageProcessor.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init_pool(self, pool_name):
		self.worker_pools[pool_name] = {
			'outgoing_q'   : queue.Queue(),
			'incoming_q'   : queue.Queue(),
			'workers'      : [],

			'dispatch_map' : cachetools.TTLCache(maxsize=10000, ttl=hours(24)),

			# The chunk structure is slightly annoying, so just limit to 50 partial message keys, and
			# a TTL of 3 hours.
			# 'chunk_cache'  : fdict.sfdict(filename=os.path.join(settings_file.CHUNK_CACHE_DIR, "chunk_cache_{}.db".format(pool_name.lower()))),
			'chunk_cache'   : cachetools.TTLCache(maxsize=50, ttl=hours(3)),
			'chunk_lock'   : threading.Lock(),
		} 
Example #22
Source File: caches.py    From endpoints-management-python with Apache License 2.0 4 votes vote down vote up
def create(options, timer=None, use_deque=True):
    """Create a cache specified by ``options``

    ``options`` is an instance of either
    :class:`endpoints_management.control.caches.CheckOptions` or
    :class:`endpoints_management.control.caches.ReportOptions`

    The returned cache is wrapped in a :class:`LockedObject`, requiring it to
    be accessed in a with statement that gives synchronized access

    Example:
      >>> options = CheckOptions()
      >>> synced_cache = make_cache(options)
      >>> with synced_cache as cache:  #  acquire the lock
      ...    cache['a_key'] = 'a_value'

    Args:
      options (object): an instance of either of the options classes

    Returns:
      :class:`cachetools.Cache`: the cache implementation specified by options
        or None: if options is ``None`` or if options.num_entries < 0

    Raises:
       ValueError: if options is not a support type

    """
    if options is None:  # no options, don't create cache
        return None

    if not isinstance(options, (CheckOptions, QuotaOptions, ReportOptions)):
        _logger.error(u'make_cache(): bad options %s', options)
        raise ValueError(u'Invalid options')

    if (options.num_entries <= 0):
        _logger.debug(u"did not create cache, options was %s", options)
        return None

    _logger.debug(u"creating a cache from %s", options)
    if (options.flush_interval > ZERO_INTERVAL):
        # options always has a flush_interval, but may have an expiration
        # field. If the expiration is present, use that instead of the
        # flush_interval for the ttl
        ttl = getattr(options, u'expiration', options.flush_interval)
        cache_cls = DequeOutTTLCache if use_deque else cachetools.TTLCache
        return LockedObject(
            cache_cls(
                options.num_entries,
                ttl=ttl.total_seconds(),
                timer=to_cache_timer(timer)
            ))

    cache_cls = DequeOutLRUCache if use_deque else cachetools.LRUCache
    return LockedObject(cache_cls(options.num_entries)) 
Example #23
Source File: misc_db.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def set_in_db_key_value_store(key, new_data):
	global KV_META_CACHE

	new_s = str(new_data)
	if len(new_s) > 40:
		new_s = new_s[:35] + "..."

	kv_log.info("Setting kv key '%s' to '%s'", key, new_s)
	if key in KV_META_CACHE:
		if KV_META_CACHE[key] == new_data:
			return

	thread_id = "kv_store_{}".format(threading.get_ident())

	with session_context(thread_id) as sess:
		have = sess.query(KeyValueStore).filter(KeyValueStore.key == key).scalar()
		if have:
			if have.value != new_data:
				kv_log.info("Updating item: '%s', '%s'", have, have.key)
				kv_log.info("	old -> %s", have.value)
				kv_log.info("	new -> %s", new_s)
				have.value = new_data
			else:
				kv_log.info("Item has not changed. Nothing to do!")
		else:
			kv_log.info("New item: '%s', %s", key, new_s)
			new = KeyValueStore(
				key   = key,
				value = new_data,
				)
			sess.add(new)

		sess.commit()

	try:
		KV_META_CACHE[key] = copy.copy(new_data)
	except KeyError:
		KV_META_CACHE = cachetools.TTLCache(maxsize=5000, ttl=60 * 5)
		KV_META_CACHE[key] = copy.copy(new_data)


##########################################################################################
##########################################################################################
##########################################################################################
########################################################################################## 
Example #24
Source File: rss_func_db.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def set_feed_article_meta(feedid, new_data):
	global QIDIAN_META_CACHE
	# if feedid in QIDIAN_META_CACHE:
	# 	if QIDIAN_META_CACHE[feedid] == new_data:
	# 		return

	sess = get_db_session(flask_sess_if_possible=False)
	have = sess.query(QidianFeedPostMeta).filter(QidianFeedPostMeta.contentid == feedid).scalar()
	if have:
		if have.meta != new_data:
			print("Updating item: ", have, have.contentid)
			print("	old -> ", have.meta)
			print("	new -> ", new_data)
			have.meta = new_data
		else:
			print("Item has not changed. Nothing to do!")
	else:
		print("New item: ", feedid, new_data)
		new = QidianFeedPostMeta(
			contentid = feedid,
			meta      = new_data,
			)
		sess.add(new)

	sess.commit()

	try:
		QIDIAN_META_CACHE[feedid] = new_data
	except KeyError:
		QIDIAN_META_CACHE = cachetools.TTLCache(maxsize=5000, ttl=60 * 5)
		QIDIAN_META_CACHE[feedid] = new_data

	return


##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################


# LRU Cache of function text -> function objects. 
Example #25
Source File: pipe.py    From d6tpipe with MIT License 3 votes vote down vote up
def __init__(self, api, name, mode='default', sortby='filename', credentials=None):

        # set params
        super().__init__(name, sortby)
        self.api = api
        self.api_islocal = api.__class__.__name__== 'APILocal'
        if not mode in _cfg_mode_valid:
            raise ValueError('Invalid mode, needs to be {}'.format(_cfg_mode_valid))
        self.mode = mode

        # get remote details
        self.cnxnapi = api.cnxn
        self.cnxnpipe = self.cnxnapi.pipes._(name)
        self.settings = self.cnxnpipe.get()[1]
        if not self.settings:
            raise ValueError('pipe not found, make sure it was created')
        if self.settings['protocol'] not in ['s3','ftp', 'sftp']:
            raise NotImplementedError('Unsupported protocol, only s3 and (s)ftp supported')
        self.settings['options'] = self.settings.get('options',{})
        self.remote_prefix = self.settings['options']['remotepath']
        self.encrypted_pipe = self.settings['options'].get('encrypted',False)
        if self.encrypted_pipe:
            self.settings = self.api.decode(self.settings)
        self.role = self.settings.get('role')
        self.cfg_profile = api.cfg_profile
        self._set_dir(self.name)
        self.credentials_override = credentials

        # DDL
        self.schema = self.settings.get('schema',{})

        # create db connection
        self._db = TinyDB(self.cfg_profile['filedb'], storage=_tdbserialization)
        self.dbfiles = self._db.table(name+'-files')
        self.dbconfig = self._db.table(name+'-cfg')

        self._cache_scan = cachetools.TTLCache(maxsize=1, ttl=5*60)

        # connect msg
        msg = 'Successfully connected to pipe {}. '.format(self.name)
        if self.role=='read':
            msg += ' Read only access'
        print(msg)
        self.dbconfig.upsert({'name': self.name, 'pipe': self.settings}, Query().name == self.name)