Python redis.call() Examples

The following are 19 code examples of redis.call(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module redis , or try the search function .
Example #1
Source File: state.py    From resolwe with Apache License 2.0 6 votes vote down vote up
def __init__(self, *args, **kwargs):
            """Initialize atomic integer instance.

            :param initial_value: Optional. The initial value of this
                variable in case it doesn't exist in Redis yet.
            """
            super().__init__(*args, **kwargs)
            self._lua_add = ManagerState.LuaFunction(self, "oldval + arg", short=True)
            self._lua_mul = ManagerState.LuaFunction(self, "oldval * arg", short=True)
            self._lua_floordiv = ManagerState.LuaFunction(
                self, "floor(oldval / arg)", short=True
            )
            self._lua_cas = ManagerState.LuaFunction(
                self,
                """
                local oldval = tonumber(redis.call('EXISTS', KEYS[1]) and redis.call('GET', KEYS[1]) or ARGV[3])
                if oldval == tonumber(ARGV[1]) then
                    redis.call('SET', KEYS[1], tonumber(ARGV[2]))
                    return 1
                else
                    return 0
                end
            """,
            )
            self.initial_value = kwargs.get("initial_value", 0) 
Example #2
Source File: queue.py    From daf-recipes with GNU General Public License v3.0 6 votes vote down vote up
def gather_stage(harvester, job):
    '''Calls the harvester's gather_stage, returning harvest object ids, with
    some error handling.

    This is split off from gather_callback so that tests can call it without
    dealing with queue stuff.
    '''
    job.gather_started = datetime.datetime.utcnow()

    try:
        harvest_object_ids = harvester.gather_stage(job)
    except (Exception, KeyboardInterrupt):
        harvest_objects = model.Session.query(HarvestObject).filter_by(
            harvest_job_id=job.id
        )
        for harvest_object in harvest_objects:
            model.Session.delete(harvest_object)
        model.Session.commit()
        raise
    finally:
        job.gather_finished = datetime.datetime.utcnow()
        job.save()
    return harvest_object_ids 
Example #3
Source File: test_redis_scripts.py    From tasktiger with MIT License 6 votes vote down vote up
def test_execute_pipeline_script(self, can_replicate_commands):
        if not self.scripts.can_replicate_commands:
            assert False, 'test suite needs Redis 3.2 or higher'

        self.scripts._can_replicate_commands = can_replicate_commands

        self.conn.script_flush()

        s = self.conn.register_script("redis.call('set', 'x', 'y')")

        # Uncached execution
        p = self.conn.pipeline()
        s(client=p)
        self.scripts.execute_pipeline(p)
        assert self.conn.get('x') == 'y'

        self.conn.delete('x')

        # Cached execution
        p = self.conn.pipeline()
        s(client=p)
        self.scripts.execute_pipeline(p)
        assert self.conn.get('x') == 'y' 
Example #4
Source File: qbcli_redis.py    From stolos with Apache License 2.0 6 votes vote down vote up
def consume(self):
        """Consume value gotten from queue.
        Raise UserWarning if consume() called before get()
        """
        if self._item is None:
            raise UserWarning("Must call get() before consume()")

        self.LOCKS.pop(self._h_k)

        rv = raw_client().evalsha(
            self._SHAS['lq_consume'],
            len(self.SCRIPTS['lq_consume']['keys']),
            self._h_k, self._path, self._q_lookup, self._client_id)
        assert rv == 1

        self._h_k = None
        self._item = None 
Example #5
Source File: cloud_cache.py    From autopi-core with Apache License 2.0 6 votes vote down vote up
def _upload_batch_continuing(self, queue):
        ret = {
            "count": 0
        }

        res = self._upload_batch(queue)  # Remember this call will raise exception upon server error

        ret["count"] = res["count"]
        if "error" in res:
            ret["error"] = res["error"]

        # Continue to upload if more pending batches present
        while not "error" in res and (res["count"] == self.options.get("max_batch_size", 100) or res.get("continue", False)):
            res = self._upload_batch(queue)  # Remember this call will raise exception upon server error

            ret["count"] += res["count"]
            if "error" in res:
                ret["error"] = res["error"]

        return ret 
Example #6
Source File: cloud_cache.py    From autopi-core with Apache License 2.0 6 votes vote down vote up
def upload_pending(self):
        ret = {
            "total": 0,
        }

        try:
            res = self._upload_batch_continuing(self.PENDING_QUEUE)  # Remember this call will raise exception upon server error
            ret["total"] += res["count"]

            if "error" in res:
                ret.setdefault("errors", []).append(res["error"])

        except RequestException as rex:
            ret.setdefault("errors", []).append(str(rex))

            # Retry queue logic is moved to '_upload_batch' method

        return ret 
Example #7
Source File: lookuplib.py    From pyhamtools with MIT License 5 votes vote down vote up
def _lookup_clublogAPI(self, callsign=None, timestamp=None, url="https://secure.clublog.org/dxcc", apikey=None):
        """ Set up the Lookup object for Clublog Online API
        """

        params = {"year" : timestamp.strftime("%Y"),
            "month" : timestamp.strftime("%m"),
            "day" : timestamp.strftime("%d"),
            "hour" : timestamp.strftime("%H"),
            "minute" : timestamp.strftime("%M"),
            "api" : apikey,
            "full" : "1",
            "call" : callsign
        }

        if timestamp is None:
            timestamp = datetime.utcnow().replace(tzinfo=UTC)

        if sys.version_info.major == 3:
            encodeurl = url + "?" + urllib.parse.urlencode(params)
        else:
            encodeurl = url + "?" + urllib.urlencode(params)
        response = requests.get(encodeurl, timeout=5)

        if not self._check_html_response(response):
            raise LookupError

        jsonLookup = response.json()
        lookup = {}

        for item in jsonLookup:
            if item == "Name": lookup[const.COUNTRY] = jsonLookup["Name"]
            elif item == "DXCC": lookup[const.ADIF] = int(jsonLookup["DXCC"])
            elif item == "Lon": lookup[const.LONGITUDE] = float(jsonLookup["Lon"])*(-1)
            elif item == "Lat": lookup[const.LATITUDE] = float(jsonLookup["Lat"])
            elif item == "CQZ": lookup[const.CQZ] = int(jsonLookup["CQZ"])
            elif item == "Continent": lookup[const.CONTINENT] = jsonLookup["Continent"]

        if lookup[const.ADIF] == 0:
            raise KeyError
        else:
            return lookup 
Example #8
Source File: state.py    From resolwe with Apache License 2.0 5 votes vote down vote up
def __init__(self, owner, script, short=False):
            """Construct a Lua function instance and register it.

            :param owner: The owning :class:`RedisAtomicBase` instance.
            :param script: The Lua script text to register.
            :param short: If ``True``, the script as given is just an
                expression that still needs to be wrapped into a full
                script.
            """
            if short:
                self.script = (
                    """
                    local oldval = tonumber(redis.call('EXISTS', KEYS[1]) and redis.call('GET', KEYS[1]) or ARGV[2])
                    local arg = tonumber(ARGV[1])
                    local newval = """
                    + script
                    + """
                    redis.call('SET', KEYS[1], newval)
                    return newval
                """
                )
            else:
                self.script = script

            self.owner = owner
            self.function = owner.redis.register_script(self.script) 
Example #9
Source File: tokenList.py    From pep.py with GNU Affero General Public License v3.0 5 votes vote down vote up
def deleteBanchoSessions(self):
		"""
		Remove all `peppy:sessions:*` redis keys.
		Call at bancho startup to delete old cached sessions

		:return:
		"""
		try:
			# TODO: Make function or some redis meme
			glob.redis.eval("return redis.call('del', unpack(redis.call('keys', ARGV[1])))", 0, "peppy:sessions:*")
		except redis.RedisError:
			pass 
Example #10
Source File: base.py    From pysoa with Apache License 2.0 5 votes vote down vote up
def __init__(self, ring_size):  # type: (int) -> None
        self.metrics_counter_getter = None  # type: Optional[Callable[[six.text_type], Counter]]

        # These should not be overridden by subclasses. The standard or Sentinel base class determines the ring size
        # and passes it in, and then we create a randomized cycle-iterator (which can be infinitely next-ed) of
        # connection indexes to use for choosing a connection when posting to request queues (response queues use a
        # consistent hashing algorithm).
        self._ring_size = ring_size
        self._connection_index_generator = itertools.cycle(random.sample(range(self._ring_size), k=self._ring_size))

        # It doesn't matter which connection we use for this. The underlying socket connection isn't even used (or
        # established, for that matter). But constructing a Script with the `redis` library requires passing it a
        # "default" connection that will be used if we ever call that script without a connection (we won't).
        self.send_message_to_queue = SendMessageToQueueCommand(self._get_connection(0)) 
Example #11
Source File: redis_session.py    From termite-visualizations with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, record_id):
        # Support DAL shortcut query: table(record_id)

        q = self.id  # This will call the __getattr__ below
                     # returning a MockQuery

        # Instructs MockQuery, to behave as db(table.id == record_id)
        q.op = 'eq'
        q.value = record_id

        row = q.select()
        return row[0] if row else Storage() 
Example #12
Source File: ch11_listing_source.py    From riacn-code with MIT License 5 votes vote down vote up
def script_load(script):
    # 将 SCRIPT LOAD 命令返回的已缓存脚本 SHA1 校验和储存到一个列表里面,
    # 以便之后在 call() 函数内部对其进行修改。
    sha = [None]                
    # 在调用已载入脚本的时候,
    # 用户需要将 Redis 连接、脚本要处理的键以及脚本的其他参数传递给脚本。
    def call(conn, keys=[], args=[], force_eval=False):  
        if not force_eval:
            # 程序只会在 SHA1 校验和未被缓存的情况下尝试载入脚本。
            if not sha[0]:   
                # 如果 SHA1 校验和未被缓存,那么载入给定的脚本
                sha[0] = conn.execute_command(              
                    "SCRIPT", "LOAD", script, parse="LOAD") 
    
            try:
                # 使用已缓存的 SHA1 校验和执行命令。
                return conn.execute_command(                    
                    "EVALSHA", sha[0], len(keys), *(keys+args)) 
        
            except redis.exceptions.ResponseError as msg:
                # 如果错误与脚本缺失无关,那么重新抛出异常。
                if not msg.args[0].startswith("NOSCRIPT"):      
                    raise                                       
        
        # 当程序接收到脚本错误的时候,
        # 又或者程序需要强制执行脚本的时候,
        # 它会使用 EVAL 命令直接执行给定的脚本。
        # EVAL 命令在执行完脚本之后,
        # 会自动地把脚本缓存起来,
        # 而缓存产生的 SHA1 校验和跟使用 EVALSHA 命令缓存脚本产生的 SHA1 校验和是完全相同的。
        return conn.execute_command(                   
            "EVAL", script, len(keys), *(keys+args))   
    
    # 返回一个函数,这个函数在被调用的时候会自动载入并执行脚本。
    return call            
# <end id="script-load"/> 
Example #13
Source File: cloud_cache.py    From autopi-core with Apache License 2.0 5 votes vote down vote up
def upload_pending(self):
        ret = {
            "total": 0,
        }

        try:
            res = self._upload_batch_continuing(self.PENDING_QUEUE)  # Remember this call will raise exception upon server error
            ret["total"] += res["count"]

            if "error" in res:
                ret.setdefault("errors", []).append(res["error"])

        # Only retry upon server error
        except RequestException as rex:
            ret.setdefault("errors", []).append(str(rex))

            work_queue = self.WORK_QUEUE.format(self.PENDING_QUEUE)
            if self.options.get("max_retry", 10) > 0:

                # Create retry queue for batch
                retry_queue = self.RETRY_QUEUE.format(datetime.datetime.utcnow(), 0)
                log.warning("Failed to upload pending batch - transferring to new dedicated retry queue '{:}': {:}".format(retry_queue, rex))

                self.client.pipeline() \
                    .renamenx(work_queue, retry_queue) \
                    .bgsave() \
                    .execute()

            else:
                log.warning("Failed to upload pending batch - leaving batch in queue '{:}': {:}".format(work_queue, rex))

        return ret 
Example #14
Source File: cloud_cache.py    From autopi-core with Apache License 2.0 5 votes vote down vote up
def upload_failing(self):
        ret = {
            "total": 0,
        }

        queues = self.list_queues(pattern="fail_*")  # This will also include work queues if present
        if queues:
            log.warning("Found {:} fail queue(s)".format(len(queues)))

        try:
            for queue in queues:
                res = self._upload_batch_continuing(queue)  # Remember this call will raise exception upon server error
                ret["total"] += res["count"]

                # Stop upon first error
                if "error" in res:
                    ret.setdefault("errors", []).append(res["error"])

                    break

        except RequestException as rex:
            ret.setdefault("errors", []).append(str(rex))

            log.warning("Still unable to upload failed batch(es): {:}".format(rex))

        return ret 
Example #15
Source File: cloud_cache.py    From autopi-core with Apache License 2.0 5 votes vote down vote up
def _upload_batch(self, queue):
        ret = {
            "count": 0
        }

        source_queue = re.sub(self.WORK_QUEUE_REGEX, "", queue)  # Remove suffix if already a work queue
        work_queue = self.WORK_QUEUE.format(source_queue)

        # Pop next batch into work queue
        batch = self._dequeue_batch(source_queue, work_queue, self.options.get("max_batch_size", 100))
        if not batch:
            if log.isEnabledFor(logging.DEBUG):
                log.debug("No batch found to upload from queue '{:}'".format(queue))

            return ret

        # Upload batch
        payload = self._prepare_payload_for(batch)
        ok, msg = self._upload(payload)  # Remember this call will raise exception upon server error
        if ok:
            log.info("Uploaded batch with {:} entries from queue '{:}'".format(len(batch), queue))

            ret["count"] = len(batch)

            # Batch uploaded equals work completed
            self.client.pipeline() \
                .delete(work_queue) \
                .bgsave() \
                .execute()
        else:
            log.warning("Temporarily unable to upload batch with {:} entries from queue '{:}': {:}".format(len(batch), queue, msg))

            ret["error"] = msg

        return ret 
Example #16
Source File: queue.py    From daf-recipes with GNU General Public License v3.0 5 votes vote down vote up
def queue_purge(self, queue=None):
        '''
        Purge the consumer's queue.

        The ``queue`` parameter exists only for compatibility and is
        ignored.
        '''
        # Use a script to make the operation atomic
        lua_code = b'''
            local routing_key = KEYS[1]
            local message_key = ARGV[1]
            local count = 0
            while true do
                local s = redis.call("lpop", routing_key)
                if s == false then
                    break
                end
                local value = cjson.decode(s)
                local id = value[message_key]
                local persistance_key = routing_key .. ":" .. id
                redis.call("del", persistance_key)
                count = count + 1
            end
            return count
        '''
        script = self.redis.register_script(lua_code)
        return script(keys=[self.routing_key], args=[self.message_key]) 
Example #17
Source File: ch11_listing_source.py    From https---github.com-josiahcarlson-redis-in-action with MIT License 5 votes vote down vote up
def script_load(script):
    sha = [None]                #A
    def call(conn, keys=[], args=[], force_eval=False):   #B
        if not force_eval:
            if not sha[0]:   #C
                sha[0] = conn.execute_command(              #D
                    "SCRIPT", "LOAD", script, parse="LOAD") #D
    
            try:
                return conn.execute_command(                    #E
                    "EVALSHA", sha[0], len(keys), *(keys+args)) #E
        
            except redis.exceptions.ResponseError as msg:
                if not msg.args[0].startswith("NOSCRIPT"):      #F
                    raise                                       #F
        
        return conn.execute_command(                    #G
            "EVAL", script, len(keys), *(keys+args))    #G
    
    return call             #H
# <end id="script-load"/>
#A Store the cached SHA1 hash of the result of SCRIPT LOAD in a list so we can change it later from within the call() function
#B When calling the "loaded script", you must provide the connection, the set of keys that the script will manipulate, and any other arguments to the function
#C We will only try loading the script if we don't already have a cached SHA1 hash
#D Load the script if we don't already have the SHA1 hash cached
#E Execute the command from the cached SHA1
#F If the error was unrelated to a missing script, re-raise the exception
#G If we received a script-related error, or if we need to force-execute the script, directly execute the script, which will automatically cache the script on the server (with the same SHA1 that we've already cached) when done
#H Return the function that automatically loads and executes scripts when called
#END 
Example #18
Source File: config.py    From fomalhaut-panel with MIT License 4 votes vote down vote up
def transfer_to_redis(request):
    """
    将配置数据同步到Redis中
    """
    success, msg = False, ''
    try:
        config_data = get_config_redis_json()
        logger.debug(config_data)
        r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
                              db=settings.REDIS_DB, password=settings.REDIS_PASSWORD)

        # 默认transaction=True
        pipe = r.pipeline(transaction=True)
        # 按模式匹配批量删除
        pattern_delete_lua = """
        local keys = redis.call('keys', ARGV[1])
        for i = 1, table.getn(keys) do
            redis.call('del', keys[i])
        end
        """
        pattern_delete = r.register_script(pattern_delete_lua)
        pattern_delete(keys=[''], args=['%s:*' % settings.CLIENT_CONFIG_REDIS_PREFIX], client=pipe)

        for t in config_data:
            logger.debug(t)
            #
            # client = {}
            # for k, v in t.iteritems():
            #     if k != 'endpoints':
            #         client[k] = v
            pipe.set('%s:%s' % (settings.CLIENT_CONFIG_REDIS_PREFIX, t['app_id']), json_dumps(t))

            # for s in t['endpoints']:
            #     pipe.set('%s:%s:%s:%s' % (settings.PROXY_CONFIG_REDIS_PREFIX, t['access_key'], s['name'], s['version']),
            #              json_dumps(s))
        # pipe.delete('config:*')

        # the EXECUTE call sends all buffered commands to the server, returning
        # a list of responses, one for each command.
        pipe.execute()
        success = True
    except Exception as e:
        msg = '同步配置数据到 Redis 出现异常'
        logger.error(e.message)
        logger.error(traceback.format_exc())

    return http_response_json({'success': success, 'msg': msg}) 
Example #19
Source File: queue.py    From daf-recipes with GNU General Public License v3.0 4 votes vote down vote up
def fetch_and_import_stages(harvester, obj):
    obj.fetch_started = datetime.datetime.utcnow()
    obj.state = "FETCH"
    obj.save()
    success_fetch = harvester.fetch_stage(obj)
    obj.fetch_finished = datetime.datetime.utcnow()
    obj.save()
    if success_fetch is True:
        # If no errors where found, call the import method
        obj.import_started = datetime.datetime.utcnow()
        obj.state = "IMPORT"
        obj.save()
        success_import = harvester.import_stage(obj)
        obj.import_finished = datetime.datetime.utcnow()
        if success_import:
            obj.state = "COMPLETE"
            if success_import is 'unchanged':
                obj.report_status = 'not modified'
                obj.save()
                return
        else:
            obj.state = "ERROR"
        obj.save()
    elif success_fetch == 'unchanged':
        obj.state = 'COMPLETE'
        obj.report_status = 'not modified'
        obj.save()
        return
    else:
        obj.state = "ERROR"
        obj.save()
    if obj.state == 'ERROR':
        obj.report_status = 'errored'
    elif obj.current == False:
        obj.report_status = 'deleted'
    elif len(model.Session.query(HarvestObject)
           .filter_by(package_id = obj.package_id)
           .limit(2)
           .all()) == 2:
        obj.report_status = 'updated'
    else:
        obj.report_status = 'added'
    obj.save()