Python sqlalchemy.and_() Examples

The following are 30 code examples of sqlalchemy.and_(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sqlalchemy , or try the search function .
Example #1
Source File: rse.py    From rucio with Apache License 2.0 9 votes vote down vote up
def get_rse(rse_id, session=None):
    """
    Get a RSE or raise if it does not exist.

    :param rse_id:  The rse id.
    :param session: The database session in use.

    :raises RSENotFound: If referred RSE was not found in the database.
    """

    false_value = False  # To make pep8 checker happy ...
    try:
        tmp = session.query(models.RSE).\
            filter(sqlalchemy.and_(models.RSE.deleted == false_value,
                                   models.RSE.id == rse_id))\
            .one()
        tmp['type'] = tmp.rse_type
        return tmp
    except sqlalchemy.orm.exc.NoResultFound:
        raise exception.RSENotFound('RSE with id \'%s\' cannot be found' % rse_id) 
Example #2
Source File: replica.py    From rucio with Apache License 2.0 6 votes vote down vote up
def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None):
    """
    Sets a tombstone on a replica.

    :param rse_id: ID of RSE.
    :param scope: scope of the replica DID.
    :param name: name of the replica DID.
    :param tombstone: the tombstone to set. Default is OBSOLETE
    :param session: database session in use.
    """
    stmt = update(models.RSEFileAssociation).where(and_(models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope,
                                                        ~session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rse_id=rse_id).exists()))\
                                            .values(tombstone=tombstone)
    result = session.execute(stmt)
    if not result.rowcount:
        try:
            session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one()
            raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
        except NoResultFound:
            raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) 
Example #3
Source File: fetcher.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _next_prev_query(self, obj, next_or_prev='next'):
        session = sa.orm.object_session(obj)

        return (
            session.query(obj.__class__)
            .filter(
                sa.and_(
                    getattr(
                        obj.__class__,
                        tx_column_name(obj)
                    )
                    ==
                    self._transaction_id_subquery(
                        obj, next_or_prev=next_or_prev
                    ),
                    *parent_criteria(obj)
                )
            )
        ) 
Example #4
Source File: operators.py    From jbox with MIT License 6 votes vote down vote up
def __and__(self, other):
        """Implement the ``&`` operator.

        When used with SQL expressions, results in an
        AND operation, equivalent to
        :func:`~.expression.and_`, that is::

            a & b

        is equivalent to::

            from sqlalchemy import and_
            and_(a, b)

        Care should be taken when using ``&`` regarding
        operator precedence; the ``&`` operator has the highest precedence.
        The operands should be enclosed in parenthesis if they contain
        further sub expressions::

            (a == 2) & (b == 4)

        """
        return self.operate(and_, other) 
Example #5
Source File: search_engine.py    From houndsploit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def search_shellcodes_version(software_name, num_version):
    """
    Perform a search based on exploits' description for an input search that contains a number of version.
    This function is called by 'search_vulnerabilities_version' method.
    :param software_name: the name of the software that the user is searching for.
    :param num_version: the specific number of version the user is searching for.
    :return: a queryset with search result found in 'searcher_exploit' DB table.
    """
    session = start_session()
    queryset = session.query(Shellcode).filter(and_(Shellcode.description.contains(software_name)))
    query_result_set = queryset2list(queryset)
    session.close()
    # limit the time spent for searching useless results.
    if queryset.count() > N_MAX_RESULTS_NUMB_VERSION:
        # return Exploit.objects.none()
        return void_result_set()
    final_result_set = []
    for shellcode in query_result_set:
        # if exploit not contains '<'
        if not str(shellcode.description).__contains__('<'):
            final_result_set = filter_shellcodes_without_comparator(shellcode, num_version, software_name, final_result_set)
        # if exploit contains '<'
        else:
            final_result_set = filter_shellcodes_with_comparator(shellcode, num_version, software_name, final_result_set)
    return final_result_set 
Example #6
Source File: search_engine.py    From houndsploit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def search_exploits_version(software_name, num_version):
    """
    Perform a search based on exploits' description for an input search that contains a number of version.
    This function is called by 'search_vulnerabilities_version' method.
    :param software_name: the name of the software that the user is searching for.
    :param num_version: the specific number of version the user is searching for.
    :return: a queryset with search result found in 'searcher_exploit' DB table.
    """
    session = start_session()
    queryset = session.query(Exploit).filter(and_(Exploit.description.contains(software_name)))
    query_result_set = queryset2list(queryset)
    session.close()
    # limit the time spent for searching useless results.
    if queryset.count() > N_MAX_RESULTS_NUMB_VERSION:
        return void_result_set()
    final_result_set = []
    for exploit in query_result_set:
        # if exploit not contains '<'
        if not str(exploit.description).__contains__('<'):
            final_result_set = filter_exploits_without_comparator(exploit, num_version, software_name, final_result_set)
        # if exploit contains '<'
        else:
            final_result_set = filter_exploits_with_comparator(exploit, num_version, software_name, final_result_set)
    return final_result_set 
Example #7
Source File: search_engine.py    From houndsploit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def search_vulnerabilities_for_author(word_list, db_table):
    """
    Search vulnerabilities for author.
    :param word_list: the list of words searched by the user.
    :param db_table: the database table in which perform the search.
    :return: the list containing the results of the performed search.
    """
    session = start_session()

    if db_table == 'searcher_exploit':
        queryset = session.query(Exploit).filter(and_(Exploit.author.contains(word) for word in word_list))
    else:
        queryset = session.query(Shellcode).filter(
            and_(Shellcode.author.contains(word) for word in word_list))

    session.close()
    return queryset2list(queryset) 
Example #8
Source File: search_engine.py    From houndsploit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def search_vulnerabilities_for_file(word_list, db_table):
    """
    Search vulnerabilities for file.
    :param word_list: the list of words searched by the user.
    :param db_table: the database table in which perform the search.
    :return: the list containing the results of the performed search.
    """
    session = start_session()

    if db_table == 'searcher_exploit':
        queryset = session.query(Exploit).filter(and_(Exploit.file.contains(word) for word in word_list))
    else:
        queryset = session.query(Shellcode).filter(
            and_(Shellcode.file.contains(word) for word in word_list))

    session.close()
    return queryset2list(queryset) 
Example #9
Source File: interface.py    From SempoBlockchain with GNU General Public License v3.0 6 votes vote down vote up
def _unconsume_high_failed_nonces(self, signing_wallet_id, stating_nonce):
        expire_time = datetime.datetime.utcnow() - datetime.timedelta(
            seconds=self.PENDING_TRANSACTION_EXPIRY_SECONDS
        )

        highest_known_success = (self.session.query(BlockchainTransaction)
                                 .filter(and_(BlockchainTransaction.signing_wallet_id == signing_wallet_id,
                                              BlockchainTransaction.status == 'SUCCESS'))
                                 .order_by(BlockchainTransaction.id.desc()).first()
                                 )

        if highest_known_success:
            highest_known_nonce = highest_known_success.nonce or 0
        else:
            highest_known_nonce = 0

        nonce = max(stating_nonce, highest_known_nonce)

        (self.session.query(BlockchainTransaction)
         .filter(and_(BlockchainTransaction.signing_wallet_id == signing_wallet_id,
                      BlockchainTransaction.status == 'FAILED',
                      BlockchainTransaction.nonce > nonce,
                      BlockchainTransaction.submitted_date < expire_time))
         .update({BlockchainTransaction.nonce_consumed: False},
                 synchronize_session=False)) 
Example #10
Source File: search_engine.py    From houndsploit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def search_vulnerabilities_for_description(word_list, db_table):
    """
    Search vulnerabilities for description.
    :param word_list: the list of words searched by the user.
    :param db_table: the database table in which perform the search.
    :return: the list containing the results of the performed search.
    """
    session = start_session()

    if db_table == 'searcher_exploit':
        queryset = session.query(Exploit).filter(and_(Exploit.description.contains(word) for word in word_list))
    else:
        queryset = session.query(Shellcode).filter(
            and_(Shellcode.description.contains(word) for word in word_list))

    session.close()
    return queryset2list(queryset) 
Example #11
Source File: fetcher.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def next_query(self, obj):
        """
        Returns the query that fetches the next version relative to this
        version in the version history.
        """
        session = sa.orm.object_session(obj)

        return (
            session.query(obj.__class__)
            .filter(
                sa.and_(
                    getattr(obj.__class__, tx_column_name(obj))
                    ==
                    getattr(obj, end_tx_column_name(obj)),
                    *parent_criteria(obj)
                )
            )
        ) 
Example #12
Source File: fetcher.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def previous_query(self, obj):
        """
        Returns the query that fetches the previous version relative to this
        version in the version history.
        """
        session = sa.orm.object_session(obj)

        return (
            session.query(obj.__class__)
            .filter(
                sa.and_(
                    getattr(obj.__class__, end_tx_column_name(obj))
                    ==
                    getattr(obj, tx_column_name(obj)),
                    *parent_criteria(obj)
                )
            )
        ) 
Example #13
Source File: temporary_did.py    From rucio with Apache License 2.0 6 votes vote down vote up
def delete_temporary_dids(dids, session=None):
    """
    Delete temporary file replicas.

    :param dids: the list of files to delete.
    :param session
    """
    where_clause = []
    for did in dids:
        where_clause.append(and_(models.TemporaryDataIdentifier.scope == did['scope'],
                                 models.TemporaryDataIdentifier.name == did['name']))

    if where_clause:
        return session.query(models.TemporaryDataIdentifier).\
            with_hint(models.TemporaryDataIdentifier, "INDEX(tmp_dids TMP_DIDS_PK)", 'oracle').\
            filter(or_(*where_clause)).delete(synchronize_session=False)
    return 
Example #14
Source File: request.py    From rucio with Apache License 2.0 6 votes vote down vote up
def release_waiting_requests_per_deadline(rse_id=None, deadline=1, session=None):
    """
    Release waiting requests that were waiting too long and exceeded the maximum waiting time to be released.
    If the DID of a request is attached to a dataset, the oldest requested_at date of all requests related to the dataset will be used for checking and all requests of this dataset will be released.
    :param rse_id:           The source RSE id.
    :param deadline:         Maximal waiting time in hours until a dataset gets released.
    :param session:          The database session.
    """
    amount_released_requests = 0
    if deadline:
        grouped_requests_subquery, filtered_requests_subquery = create_base_query_grouped_fifo(rse_id, filter_by_rse='source', session=session)
        old_requests_subquery = session.query(grouped_requests_subquery.c.name,
                                              grouped_requests_subquery.c.scope,
                                              grouped_requests_subquery.c.oldest_requested_at)\
                                       .filter(grouped_requests_subquery.c.oldest_requested_at < datetime.datetime.now() - datetime.timedelta(hours=deadline))\
                                       .subquery()
        old_requests_subquery = session.query(filtered_requests_subquery.c.id)\
                                       .join(old_requests_subquery, and_(filtered_requests_subquery.c.dataset_name == old_requests_subquery.c.name, filtered_requests_subquery.c.dataset_scope == old_requests_subquery.c.scope))
        old_requests_subquery = old_requests_subquery.subquery()
        statement = update(models.Request).where(models.Request.id.in_(old_requests_subquery)).values(state=RequestState.QUEUED)
        amount_released_requests = session.execute(statement).rowcount
    return amount_released_requests 
Example #15
Source File: quote.py    From zvt with MIT License 5 votes vote down vote up
def data_exist(session, schema, id):
    return session.query(exists().where(and_(schema.id == id))).scalar() 
Example #16
Source File: api.py    From zvt with MIT License 5 votes vote down vote up
def data_exist(session, schema, id):
    return session.query(exists().where(and_(schema.id == id))).scalar() 
Example #17
Source File: test.py    From ACE with Apache License 2.0 5 votes vote down vote up
def test_automation_cleanup(self):
        
        # make sure a lock uuid is created
        start_remediation_system_manager()
        stop_remediation_system_manager()

        # insert a new work request
        remediation_id = request_remediation(REMEDIATION_TYPE_EMAIL, '<message_id>', '<recipient@localhost>', 
                                             user_id=saq.test.UNITTEST_USER_ID, company_id=saq.COMPANY_ID)

        # pretend it started processing
        saq.db.execute(Remediation.__table__.update().values(
            lock=self.system.lock,
            lock_time=func.now(),
            status=REMEDIATION_STATUS_IN_PROGRESS).where(and_(
            Remediation.company_id == saq.COMPANY_ID,
            Remediation.lock == None,
            Remediation.status == REMEDIATION_STATUS_NEW)))
        saq.db.commit()

        # start up the system again
        start_remediation_system_manager()

        # and it should process that job
        wait_for(
            lambda: len(saq.db.query(Remediation).filter(
                Remediation.id == remediation_id, 
                Remediation.status == REMEDIATION_STATUS_COMPLETED).all()) > 0,
            1, 5)

        stop_remediation_system_manager() 
Example #18
Source File: repo_request.py    From oadoi with MIT License 5 votes vote down vote up
def matching_repositories(self):

        response = self.repositories

        if not self.institution_name or not self.repo_name:
            return response

        matching_query = Repository.query.filter(and_(
            Repository.institution_name.ilike(u"%{}%".format(self.institution_name)),
            Repository.repository_name.ilike(u"%{}%".format(self.repo_name))))
        hits = matching_query.all()
        if hits:
            response += hits
        return response 
Example #19
Source File: database.py    From ACE with Apache License 2.0 5 votes vote down vote up
def remove_observable_tag_mapping(o_type, o_value, o_md5, tag):
    """Removes the given observable tag mapping specified by type, and md5 (hex string) and the tag you want to remove.
       Returns True if the removal was successful, False otherwise."""

    tag = saq.db.query(saq.database.Tag).filter(saq.database.Tag.name == tag).first()
    if tag is None:
        return False

    observable = None
    if o_md5 is not None:
        observable = saq.db.query(saq.database.Observable).filter(saq.database.Observable.type == o_type,
                                                                  saq.database.Observable.md5 == func.UNHEX(o_md5)).first()
    
    if observable is None:
        if o_value is None:
            return False

        from saq.observables import create_observable
        o = create_observable(o_type, o_value)
        observable = saq.db.query(saq.database.Observable).filter(saq.database.Observable.type == o.type,
                                                                  saq.database.Observable.md5 == func.UNHEX(o.md5_hex)).first()

    if observable is None:
        return False

    saq.db.execute(ObservableTagMapping.__table__.delete().where(and_(ObservableTagMapping.observable_id == observable.id,
                                                                 ObservableTagMapping.tag_id == tag.id)))
    saq.db.commit()
    return True

# this is used to map what observables had what tags in what alerts
# not to be confused with ObservableTagMapping (see above)
# I think this is what I had in mind when I originally created ObservableTagMapping
# but I was missing the alert_id field
# that table was later repurposed to automatically map tags to observables 
Example #20
Source File: index.py    From arch-security-tracker with MIT License 5 votes vote down vote up
def get_index_data(only_vulnerable=False, only_in_repo=True):
    select = (db.session.query(CVEGroup, CVE, func.group_concat(CVEGroupPackage.pkgname, ' '),
                               func.group_concat(Advisory.id, ' '))
                        .join(CVEGroupEntry, CVEGroup.issues)
                        .join(CVE, CVEGroupEntry.cve)
                        .join(CVEGroupPackage, CVEGroup.packages)
                        .outerjoin(Advisory, and_(Advisory.group_package_id == CVEGroupPackage.id,
                                                  Advisory.publication == Publication.published)))
    if only_vulnerable:
        select = select.filter(CVEGroup.status.in_([Status.unknown, Status.vulnerable, Status.testing]))
    if only_in_repo:
        select = select.join(Package, Package.name == CVEGroupPackage.pkgname)

    entries = (select.group_by(CVEGroup.id).group_by(CVE.id)
                     .order_by(CVEGroup.status.desc())
                     .order_by(CVEGroup.changed.desc())).all()

    groups = defaultdict(defaultdict)
    for group, cve, pkgs, advisories in entries:
        group_entry = groups.setdefault(group.id, {})
        group_entry['group'] = group
        group_entry['pkgs'] = list(set(pkgs.split(' ')))
        group_entry['advisories'] = advisories.split(' ') if advisories else []
        group_entry.setdefault('issues', []).append(cve)

    for key, group in groups.items():
        group['issues'] = sorted(group['issues'], key=lambda item: item.id, reverse=True)

    groups = groups.values()
    groups = sorted(groups, key=lambda item: item['group'].changed, reverse=True)
    groups = sorted(groups, key=lambda item: item['group'].severity)
    groups = sorted(groups, key=lambda item: item['group'].status)
    return groups 
Example #21
Source File: interface.py    From SempoBlockchain with GNU General Public License v3.0 5 votes vote down vote up
def _fail_expired_transactions(self):
        expire_time = datetime.datetime.utcnow() - datetime.timedelta(
            seconds=self.PENDING_TRANSACTION_EXPIRY_SECONDS
        )

        (self.session.query(BlockchainTransaction)
         .filter(and_(BlockchainTransaction.status == 'PENDING',
                      BlockchainTransaction.updated < expire_time))
         .update({BlockchainTransaction.status: 'FAILED',
                  BlockchainTransaction.error: 'Timeout Error'},
                 synchronize_session=False)) 
Example #22
Source File: request.py    From rucio with Apache License 2.0 5 votes vote down vote up
def update_requests_priority(priority, filter, session=None):
    """
    Update priority of requests.

    :param priority:  The priority as an integer from 1 to 5.
    :param filter:    Dictionary such as {'rule_id': rule_id, 'request_id': request_id, 'older_than': time_stamp, 'activities': [activities]}.
    """
    try:
        query = session.query(models.Request.id, models.Request.external_id, models.Request.external_host)\
            .join(models.ReplicaLock, and_(models.ReplicaLock.scope == models.Request.scope,
                                           models.ReplicaLock.name == models.Request.name,
                                           models.ReplicaLock.rse_id == models.Request.dest_rse_id))\
            .filter(models.Request.state == RequestState.SUBMITTED,
                    models.ReplicaLock.state == LockState.REPLICATING)
        if 'rule_id' in filter:
            query = query.filter(models.ReplicaLock.rule_id == filter['rule_id'])
        if 'request_id' in filter:
            query = query.filter(models.Request.id == filter['request_id'])
        if 'older_than' in filter:
            query = query.filter(models.Request.created_at < filter['older_than'])
        if 'activities' in filter:
            if type(filter['activities']) is not list:
                filter['activities'] = filter['activities'].split(',')
            query = query.filter(models.Request.activity.in_(filter['activities']))

        transfertool_map = {}
        for item in query.all():
            try:
                if item[2] not in transfertool_map:
                    transfertool_map[item[2]] = FTS3Transfertool(external_host=item[2])
                res = transfertool_map[item[2]].update_priority(transfer_id=item[1], priority=priority)
            except Exception:
                logging.debug("Failed to boost request %s priority: %s" % (item[0], traceback.format_exc()))
            else:
                logging.debug("Update request %s priority to %s: %s" % (item[0], priority, res['http_message']))
    except IntegrityError as error:
        raise RucioException(error.args) 
Example #23
Source File: interface.py    From SempoBlockchain with GNU General Public License v3.0 5 votes vote down vote up
def _calculate_nonce(self, signing_wallet_obj, starting_nonce=0):

        self._unconsume_high_failed_nonces(signing_wallet_obj.id, starting_nonce)
        self._fail_expired_transactions()

        # First find the highest *continuous* nonce that isn't either pending, or consumed
        # (failed or succeeded on blockchain)

        likely_consumed_nonces = (
            self.session.query(BlockchainTransaction)
                .filter(BlockchainTransaction.signing_wallet == signing_wallet_obj)
                .filter(BlockchainTransaction.ignore == False)
                .filter(BlockchainTransaction.first_block_hash == self.first_block_hash)
                .filter(
                    and_(
                        or_(BlockchainTransaction.status == 'PENDING',
                            BlockchainTransaction.nonce_consumed == True),
                        BlockchainTransaction.nonce >= starting_nonce
                    )
                )
                .all())

        # Use a set to find continous nonces because txns in db may be out of order
        nonce_set = set()
        for txn in likely_consumed_nonces:
            nonce_set.add(txn.nonce)

        next_nonce = starting_nonce
        while next_nonce in nonce_set:
            next_nonce += 1

        return next_nonce 
Example #24
Source File: test.py    From ACE with Apache License 2.0 5 votes vote down vote up
def test_automation_cleanup(self):

        from saq.database import Remediation
        
        # make sure a lock uuid is created
        manager = initialize_remediation_system_manager()
        system = manager.systems['test']
        start_remediation_system_manager()
        stop_remediation_system_manager()

        # insert a new work request
        remediation_id = system.request_remediation('<message_id>', '<recipient@localhost>', 
                                                    user_id=saq.test.UNITTEST_USER_ID, company_id=saq.COMPANY_ID)

        # pretend it started processing
        saq.db.execute(Remediation.__table__.update().values(
            lock=system.lock,
            lock_time=func.now(),
            status=REMEDIATION_STATUS_IN_PROGRESS).where(and_(
            Remediation.company_id == saq.COMPANY_ID,
            Remediation.lock == None,
            Remediation.status == REMEDIATION_STATUS_NEW)))
        saq.db.commit()

        # start up the system again
        manager = initialize_remediation_system_manager()
        start_remediation_system_manager()

        # and it should process that job
        wait_for(
            lambda: len(saq.db.query(Remediation).filter(
                Remediation.id == remediation_id, 
                Remediation.status == REMEDIATION_STATUS_COMPLETED).all()) > 0,
            1, 5)

        stop_remediation_system_manager() 
Example #25
Source File: fetcher.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _index_query(self, obj):
        """
        Returns the query needed for fetching the index of this record relative
        to version history.
        """
        alias = sa.orm.aliased(obj)

        subquery = (
            sa.select([sa.func.count('1')], from_obj=[alias.__table__])
            .where(
                getattr(alias, tx_column_name(obj))
                <
                getattr(obj, tx_column_name(obj))
            )
            .correlate(alias.__table__)
            .label('position')
        )
        query = (
            sa.select([subquery], from_obj=[obj.__table__])
            .where(
                sa.and_(*eqmap(identity, (obj.__class__, obj)))
            )
            .order_by(
                getattr(obj.__class__, tx_column_name(obj))
            )
        )
        return query 
Example #26
Source File: fetcher.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _transaction_id_subquery(self, obj, next_or_prev='next', alias=None):
        if next_or_prev == 'next':
            op = operator.gt
            func = sa.func.min
        else:
            op = operator.lt
            func = sa.func.max

        if alias is None:
            alias = sa.orm.aliased(obj)
            table = alias.__table__
            if hasattr(alias, 'c'):
                attrs = alias.c
            else:
                attrs = alias
        else:
            table = alias.original
            attrs = alias.c
        query = (
            sa.select(
                [func(
                    getattr(attrs, tx_column_name(obj))
                )],
                from_obj=[table]
            )
            .where(
                sa.and_(
                    op(
                        getattr(attrs, tx_column_name(obj)),
                        getattr(obj, tx_column_name(obj))
                    ),
                    *[
                        getattr(attrs, pk) == getattr(obj, pk)
                        for pk in get_primary_keys(obj.__class__)
                        if pk != tx_column_name(obj)
                    ]
                )
            )
            .correlate(table)
        )
        return query 
Example #27
Source File: oidc.py    From rucio with Apache License 2.0 5 votes vote down vote up
def refresh_jwt_tokens(total_workers, worker_number, refreshrate=3600, limit=1000, session=None):
    """
    Refreshes tokens which expired or will expire before (now + refreshrate)
    next run of this function and which have valid refresh token.

    :param total_workers:      Number of total workers.
    :param worker_number:      id of the executing worker.
    :param limit:              Maximum number of tokens to refresh per call.
    :param session:            Database session in use.

    :return: numper of tokens refreshed
    """
    nrefreshed = 0
    try:
        # get tokens for refresh that expire in the next <refreshrate> seconds
        expiration_future = datetime.utcnow() + timedelta(seconds=refreshrate)
        query = session.query(models.Token.token) \
                       .filter(and_(models.Token.refresh == true(),
                                    models.Token.refresh_expired_at > datetime.utcnow(),
                                    models.Token.expired_at < expiration_future))\
                       .order_by(models.Token.expired_at)
        query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='token')

        # limiting the number of tokens for refresh
        filtered_tokens_query = query.limit(limit)
        filtered_tokens = []
        filtered_bunches = query_bunches(filtered_tokens_query, 10)
        for items in filtered_bunches:
            filtered_tokens += session.query(models.Token).filter(models.Token.token.in_(items)).with_for_update(skip_locked=True).all()

        # refreshing these tokens
        for token in filtered_tokens:
            new_token = __refresh_token_oidc(token, session=session)
            if new_token:
                nrefreshed += 1

    except Exception as error:
        raise RucioException(error.args)

    return nrefreshed 
Example #28
Source File: authentication.py    From rucio with Apache License 2.0 5 votes vote down vote up
def delete_expired_tokens(total_workers, worker_number, limit=1000, session=None):
    """
    Delete expired tokens.

    :param total_workers:      Number of total workers.
    :param worker_number:      id of the executing worker.
    :param limit:              Maximum number of tokens to delete.
    :param session:            Database session in use.

    :returns: number of deleted rows
    """

    # get expired tokens
    try:
        # delete all expired tokens except tokens which have refresh token that is still valid
        query = session.query(models.Token.token).filter(and_(models.Token.expired_at <= datetime.datetime.utcnow()))\
                                                 .filter(or_(models.Token.refresh_expired_at.__eq__(None),
                                                             models.Token.refresh_expired_at <= datetime.datetime.utcnow()))\
                                                 .order_by(models.Token.expired_at)

        query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='token')

        # limiting the number of tokens deleted at once
        filtered_tokens_query = query.limit(limit)
        # remove expired tokens
        deleted_tokens = 0
        filtered_bunches = query_bunches(filtered_tokens_query, 10)
        for items in filtered_bunches:
            deleted_tokens += session.query(models.Token.token)\
                                     .filter(models.Token.token.in_(items))\
                                     .with_for_update(skip_locked=True)\
                                     .delete(synchronize_session='fetch')

    except Exception as error:
        raise RucioException(error.args)

    return deleted_tokens 
Example #29
Source File: quarantined_replica.py    From rucio with Apache License 2.0 5 votes vote down vote up
def list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):
    """
    List RSE Quarantined File replicas.

    :param rse_id: the rse id.
    :param limit: The maximum number of replicas returned.
    :param worker_number:      id of the executing worker.
    :param total_workers:      Number of total workers.
    :param session: The database session in use.

    :returns: a list of dictionary replica.
    """

    query = session.query(models.QuarantinedReplica.path,
                          models.QuarantinedReplica.bytes,
                          models.QuarantinedReplica.scope,
                          models.QuarantinedReplica.name,
                          models.QuarantinedReplica.created_at).\
        filter(models.QuarantinedReplica.rse_id == rse_id)

    # do no delete valid replicas
    stmt = exists(select([1]).prefix_with("/*+ index(REPLICAS REPLICAS_PK) */", dialect='oracle')).\
        where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,
                   models.RSEFileAssociation.name == models.QuarantinedReplica.name,
                   models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))
    query = query.filter(not_(stmt))
    query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='path')
    return [{'path': path,
             'rse_id': rse_id,
             'created_at': created_at,
             'scope': scope,
             'name': name,
             'bytes': bytes}
            for path, bytes, scope, name, created_at in query.limit(limit)] 
Example #30
Source File: sqlalchemy.py    From telethon-session-sqlalchemy with MIT License 5 votes vote down vote up
def has_session(self, session_id: str) -> bool:
        if self.core_mode:
            t = self.Session.__table__
            rows = self.db_engine.execute(select([func.count(t.c.auth_key)])
                                          .where(and_(t.c.session_id == session_id,
                                                      t.c.auth_key != b'')))
            try:
                count, = next(rows)
                return count > 0
            except StopIteration:
                return False
        else:
            return self.Session.query.filter(self.Session.session_id == session_id).count() > 0