Python oslo_db.exception.DBReferenceError() Examples

The following are 25 code examples of oslo_db.exception.DBReferenceError(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module oslo_db.exception , or try the search function .
Example #1
Source File: sqlalchemy.py    From gnocchi with Apache License 2.0 6 votes vote down vote up
def create_archive_policy_rule(self, name, metric_pattern,
                                   archive_policy_name):
        apr = ArchivePolicyRule(
            name=name,
            archive_policy_name=archive_policy_name,
            metric_pattern=metric_pattern
        )
        try:
            with self.facade.writer() as session:
                session.add(apr)
        except exception.DBReferenceError as e:
            if e.constraint == 'fk_apr_ap_name_ap_name':
                raise indexer.NoSuchArchivePolicy(archive_policy_name)
            raise
        except exception.DBDuplicateEntry:
            raise indexer.ArchivePolicyRuleAlreadyExists(name)
        return apr 
Example #2
Source File: sqlalchemy.py    From gnocchi with Apache License 2.0 6 votes vote down vote up
def create_metric(self, id, creator, archive_policy_name,
                      name=None, unit=None, resource_id=None):
        m = Metric(id=id,
                   creator=creator,
                   archive_policy_name=archive_policy_name,
                   name=name,
                   unit=unit,
                   resource_id=resource_id)
        try:
            with self.facade.writer() as session:
                session.add(m)
        except exception.DBDuplicateEntry:
            raise indexer.NamedMetricAlreadyExists(name)
        except exception.DBReferenceError as e:
            if (e.constraint ==
               'fk_metric_ap_name_ap_name'):
                raise indexer.NoSuchArchivePolicy(archive_policy_name)
            if e.constraint == 'fk_metric_resource_id_resource_id':
                raise indexer.NoSuchResource(resource_id)
            raise
        return m 
Example #3
Source File: products.py    From refstack with Apache License 2.0 6 votes vote down vote up
def delete(self, id, version_id):
        """Delete a product version.

        Endpoint: /v1/products/<product_id>/versions/<version_id>
        """
        if (not api_utils.check_user_is_product_admin(id) and
                not api_utils.check_user_is_foundation_admin()):

            pecan.abort(403, 'Forbidden.')
        try:
            version = db.get_product_version(version_id,
                                             allowed_keys=['version'])
            if not version['version']:
                pecan.abort(400, 'Can not delete the empty version as it is '
                                 'used for basic product/test association. '
                                 'This version was implicitly created with '
                                 'the product, and so it cannot be deleted '
                                 'explicitly.')

            db.delete_product_version(version_id)
        except DBReferenceError:
            pecan.abort(400, 'Unable to delete. There are still tests '
                             'associated to this product version.')
        pecan.response.status = 204 
Example #4
Source File: test_deployable.py    From cyborg with Apache License 2.0 6 votes vote down vote up
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
                                                   expected_exception):

        error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
                                        'key_table')
        # Prevent lazy-loading any fields, results in InstanceNotFound
        deployable = fake_deployable.fake_deployable_obj(self.context)
        fields_with_save_methods = [field for field in deployable.fields
                                    if hasattr(deployable, '_save_%s' % field)]
        for field in fields_with_save_methods:
            @mock.patch.object(deployable, '_save_%s' % field)
            @mock.patch.object(deployable, 'obj_attr_is_set')
            def _test(mock_is_set, mock_save_field):
                mock_is_set.return_value = True
                mock_save_field.side_effect = error
                deployable.obj_reset_changes(fields=[field])
                deployable._changed_fields.add(field)
                self.assertRaises(expected_exception, deployable.save)
                deployable.obj_reset_changes(fields=[field])
            _test() 
Example #5
Source File: test_exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def test_raise_delete(self):
        with self.engine.connect() as conn:
            conn.execute(self.table_1.insert({"id": 1234, "foo": 42}))
            conn.execute(self.table_2.insert({"id": 4321, "foo_id": 1234}))
        matched = self.assertRaises(
            exception.DBReferenceError,
            self.engine.execute,
            self.table_1.delete()
        )
        # NOTE(jd) Cannot check precisely with assertInnerException since MySQL
        # error are not the same depending on its version…
        self.assertIsInstance(matched.inner_exception,
                              sqlalchemy.exc.IntegrityError)
        self.assertEqual(1451, matched.inner_exception.orig.args[0])
        self.assertEqual("resource_entity", matched.table)
        self.assertEqual("foo_fkey", matched.constraint)
        self.assertEqual("foo_id", matched.key)
        self.assertEqual("resource_foo", matched.key_table) 
Example #6
Source File: test_exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def test_raise_ansi_quotes(self):
        with self.engine.connect() as conn:
            conn.detach()  # will not be returned to the pool when closed

            # this is incompatible with some internals of the engine
            conn.execute("SET SESSION sql_mode = 'ANSI';")

            matched = self.assertRaises(
                exception.DBReferenceError,
                conn.execute,
                self.table_2.insert({'id': 1, 'foo_id': 2})
            )

        # NOTE(jd) Cannot check precisely with assertInnerException since MySQL
        # error are not the same depending on its version…
        self.assertIsInstance(matched.inner_exception,
                              sqlalchemy.exc.IntegrityError)
        self.assertEqual(matched.inner_exception.orig.args[0], 1452)
        self.assertEqual("resource_entity", matched.table)
        self.assertEqual("foo_fkey", matched.constraint)
        self.assertEqual("foo_id", matched.key)
        self.assertEqual("resource_foo", matched.key_table) 
Example #7
Source File: test_exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def test_raise_delete(self):
        with self.engine.connect() as conn:
            conn.execute(self.table_1.insert({"id": 1234, "foo": 42}))
            conn.execute(self.table_2.insert({"id": 4321, "foo_id": 1234}))
        matched = self.assertRaises(
            exception.DBReferenceError,
            self.engine.execute,
            self.table_1.delete()
        )
        self.assertInnerException(
            matched,
            "IntegrityError",
            "update or delete on table \"resource_foo\" violates foreign key "
            "constraint \"foo_fkey\" on table \"resource_entity\"\n"
            "DETAIL:  Key (id)=(1234) is still referenced from "
            "table \"resource_entity\".\n",
            "DELETE FROM resource_foo",
            {},
        )

        self.assertEqual("resource_foo", matched.table)
        self.assertEqual("foo_fkey", matched.constraint)
        self.assertEqual("id", matched.key)
        self.assertEqual("resource_entity", matched.key_table) 
Example #8
Source File: test_db_cleanup.py    From barbican with Apache License 2.0 6 votes vote down vote up
def test_db_cleanup_raise_integrity_error(self, project):
        """Test that an integrity error is thrown

        This test tests the invalid scenario where
        the secret meta was not marked for deletion during the secret deletion.
        We want to make sure an integrity error is thrown during clean up.
        """
        # create secret
        secret = _setup_entry('secret', project=project)
        secret_metadatum = _setup_entry('secret_metadatum', secret=secret)

        # delete parent but not child and assert integrity error
        secret.deleted = True
        secret_metadatum.deleted = False

        self.assertRaises(db_exc.DBReferenceError, clean.cleanup_all) 
Example #9
Source File: exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def _foreign_key_error(integrity_error, match, engine_name, is_disconnect):
    """Filter for foreign key errors."""

    try:
        table = match.group("table")
    except IndexError:
        table = None
    try:
        constraint = match.group("constraint")
    except IndexError:
        constraint = None
    try:
        key = match.group("key")
    except IndexError:
        key = None
    try:
        key_table = match.group("key_table")
    except IndexError:
        key_table = None

    raise exception.DBReferenceError(table, constraint, key, key_table,
                                     integrity_error) 
Example #10
Source File: test_exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def test_raise(self):
        self.engine.execute("PRAGMA foreign_keys = ON;")

        matched = self.assertRaises(
            exception.DBReferenceError,
            self.engine.execute,
            self.table_2.insert({'id': 1, 'foo_id': 2})
        )

        self.assertInnerException(
            matched,
            "IntegrityError",
            "FOREIGN KEY constraint failed",
            'INSERT INTO resource_entity (id, foo_id) VALUES (?, ?)',
            (1, 2)
        )

        self.assertIsNone(matched.table)
        self.assertIsNone(matched.constraint)
        self.assertIsNone(matched.key)
        self.assertIsNone(matched.key_table) 
Example #11
Source File: test_exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def test_raise_delete(self):
        self.engine.execute("PRAGMA foreign_keys = ON;")

        with self.engine.connect() as conn:
            conn.execute(self.table_1.insert({"id": 1234, "foo": 42}))
            conn.execute(self.table_2.insert({"id": 4321, "foo_id": 1234}))
        matched = self.assertRaises(
            exception.DBReferenceError,
            self.engine.execute,
            self.table_1.delete()
        )
        self.assertInnerException(
            matched,
            "IntegrityError",
            "foreign key constraint failed",
            "DELETE FROM resource_foo",
            (),
        )

        self.assertIsNone(matched.table)
        self.assertIsNone(matched.constraint)
        self.assertIsNone(matched.key)
        self.assertIsNone(matched.key_table) 
Example #12
Source File: test_exc_filters.py    From oslo.db with Apache License 2.0 6 votes vote down vote up
def test_raise(self):
        params = {'id': 1, 'foo_id': 2}
        matched = self.assertRaises(
            exception.DBReferenceError,
            self.engine.execute,
            self.table_2.insert(params)
        )
        self.assertInnerException(
            matched,
            "IntegrityError",
            "insert or update on table \"resource_entity\" "
            "violates foreign key constraint \"foo_fkey\"\nDETAIL:  Key "
            "(foo_id)=(2) is not present in table \"resource_foo\".\n",
            "INSERT INTO resource_entity (id, foo_id) VALUES (%(id)s, "
            "%(foo_id)s)",
            params,
        )

        self.assertEqual("resource_entity", matched.table)
        self.assertEqual("foo_fkey", matched.constraint)
        self.assertEqual("foo_id", matched.key)
        self.assertEqual("resource_foo", matched.key_table) 
Example #13
Source File: vendors.py    From refstack with Apache License 2.0 5 votes vote down vote up
def delete(self, vendor_id):
        """Delete vendor."""
        if not (api_utils.check_user_is_foundation_admin()
                or api_utils.check_user_is_vendor_admin(vendor_id)):
            pecan.abort(403, 'Forbidden.')
        _check_is_not_foundation(vendor_id)

        try:
            db.delete_organization(vendor_id)
        except DBReferenceError:
            pecan.abort(400, 'Unable to delete. There are still tests '
                             'associated to products for this vendor.')
        pecan.response.status = 204 
Example #14
Source File: sqlalchemy.py    From gnocchi with Apache License 2.0 5 votes vote down vote up
def _mark_as_deleting_resource_type(self, name):
        try:
            with self.facade.writer() as session:
                rt = self._get_resource_type(session, name)
                if rt.state not in ["active", "deletion_error",
                                    "creation_error", "updating_error"]:
                    raise indexer.UnexpectedResourceTypeState(
                        name,
                        "active/deletion_error/creation_error/updating_error",
                        rt.state)
                session.delete(rt)

                # FIXME(sileht): Why do I need to flush here !!!
                # I want remove/add in the same transaction !!!
                session.flush()

                # NOTE(sileht): delete and recreate to:
                # * raise duplicate constraints
                # * ensure we do not create a new resource type
                #   with the same name while we destroy the tables next
                rt = ResourceType(name=rt.name,
                                  tablename=rt.tablename,
                                  state="deleting",
                                  attributes=rt.attributes)
                session.add(rt)
        except exception.DBReferenceError as e:
            if (e.constraint in [
                    'fk_resource_resource_type_name',
                    'fk_resource_history_resource_type_name',
                    'fk_rh_resource_type_name']):
                raise indexer.ResourceTypeInUse(name)
            raise
        return rt 
Example #15
Source File: products.py    From refstack with Apache License 2.0 5 votes vote down vote up
def delete(self, id):
        """Delete product."""
        if (not api_utils.check_user_is_foundation_admin() and
                not api_utils.check_user_is_product_admin(id)):
            pecan.abort(403, 'Forbidden.')
        try:
            db.delete_product(id)
        except DBReferenceError:
            pecan.abort(400, 'Unable to delete. There are still tests '
                             'associated to versions of this product.')
        pecan.response.status = 204 
Example #16
Source File: test_purge.py    From karbor with Apache License 2.0 5 votes vote down vote up
def test_purge_deleted_rows_integrity_failure(self):
        dialect = self.engine.url.get_dialect()
        if dialect == sqlite.dialect:
            # We're seeing issues with foreign key support in SQLite 3.6.20
            # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
            # It works fine in SQLite 3.7.
            # So return early to skip this test if running SQLite < 3.7
            import sqlite3
            tup = sqlite3.sqlite_version_info
            if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
                self.skipTest(
                    'sqlite version too old for reliable SQLA foreign_keys')
            self.conn.execute("PRAGMA foreign_keys = ON")

        # add new entry in plans and resources for
        # integrity check
        uuid_str = uuidutils.generate_uuid(dashed=False)
        ins_stmt = self.plans.insert().values(id=uuid_str)
        self.conn.execute(ins_stmt)
        ins_stmt = self.resources.insert().values(
            plan_id=uuid_str)
        self.conn.execute(ins_stmt)

        # set plans record to deleted 20 days ago
        old = timeutils.utcnow() - datetime.timedelta(days=20)
        make_old = self.plans.update().where(
            self.plans.c.id.in_([uuid_str])).values(deleted_at=old)
        self.conn.execute(make_old)

        # Verify that purge_deleted_rows fails due to Foreign Key constraint
        self.assertRaises(db_exc.DBReferenceError, db.purge_deleted_rows,
                          self.context, age_in_days=10) 
Example #17
Source File: bgp_speaker_router_insertion_db.py    From networking-midonet with Apache License 2.0 5 votes vote down vote up
def set_router_for_bgp_speaker(self, context, bgp_sp_id, r_id):
        """Sets the routers associated with the bgp speaker."""
        try:
            with db_api.CONTEXT_WRITER.using(context):
                bgp_router_db = model.BgpSpeakerRouterAssociation(
                    bgp_speaker_id=bgp_sp_id,
                    router_id=r_id)
                context.session.add(bgp_router_db)
        except db_exc.DBDuplicateEntry:
            raise l3_exc.RouterInUse(
                router_id=r_id,
                reason='is already associated with bgp speaker')
        except db_exc.DBReferenceError:
            raise l3_exc.RouterNotFound(router_id=r_id) 
Example #18
Source File: test_journal.py    From networking-odl with Apache License 2.0 5 votes vote down vote up
def _raise_DBReferenceError(*args, **kwargs):
    args = [mock.Mock(unsafe=True)] * 4
    e = exception.DBReferenceError(*args)
    raise e 
Example #19
Source File: journal.py    From networking-odl with Apache License 2.0 5 votes vote down vote up
def record(plugin_context, object_type, object_uuid, operation, data,
           ml2_context=None):
    if (object_type == odl_const.ODL_PORT and
            operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
        data = _enrich_port(
            plugin_context, ml2_context, object_type, operation, data)

    # Calculate depending_on on other journal entries
    depending_on = dependency_validations.calculate(
        plugin_context, operation, object_type, object_uuid, data)

    # NOTE(mpeterson): Between the moment that a dependency is calculated and
    # the new entry is recorded in the journal, an operation can ocurr that
    # would make the dependency irrelevant. In that case we request a retry.
    # For more details, read the commit message that introduced this comment.
    try:
        entry = db.create_pending_row(
            plugin_context, object_type, object_uuid, operation, data,
            depending_on=depending_on)
    except exception.DBReferenceError as e:
        raise exception.RetryRequest(e)

    _log_entry(LOG_RECORDED, entry)
    LOG.debug('Entry with ID %(entry_id)s depends on these entries: '
              '%(depending_on)s',
              {'entry_id': entry.seqnum,
               'depending_on': [d.seqnum for d in depending_on]}) 
Example #20
Source File: flavors.py    From octavia with Apache License 2.0 5 votes vote down vote up
def delete(self, flavor_id):
        """Deletes a Flavor"""
        context = pecan_request.context.get('octavia_context')

        self._auth_validate_action(context, context.project_id,
                                   constants.RBAC_DELETE)
        if flavor_id == constants.NIL_UUID:
            raise exceptions.NotFound(resource='Flavor', id=constants.NIL_UUID)
        serial_session = db_api.get_session(autocommit=False)
        serial_session.connection(
            execution_options={'isolation_level': 'SERIALIZABLE'})
        try:
            self.repositories.flavor.delete(serial_session, id=flavor_id)
            serial_session.commit()
        # Handle when load balancers still reference this flavor
        except odb_exceptions.DBReferenceError:
            serial_session.rollback()
            raise exceptions.ObjectInUse(object='Flavor', id=flavor_id)
        except sa_exception.NoResultFound:
            serial_session.rollback()
            raise exceptions.NotFound(resource='Flavor', id=flavor_id)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error('Unknown flavor delete exception: %s', str(e))
                serial_session.rollback()
        finally:
            serial_session.close() 
Example #21
Source File: availability_zones.py    From octavia with Apache License 2.0 5 votes vote down vote up
def delete(self, availability_zone_name):
        """Deletes an Availability Zone"""
        context = pecan_request.context.get('octavia_context')

        self._auth_validate_action(context, context.project_id,
                                   constants.RBAC_DELETE)
        if availability_zone_name == constants.NIL_UUID:
            raise exceptions.NotFound(resource='Availability Zone',
                                      id=constants.NIL_UUID)
        serial_session = db_api.get_session(autocommit=False)
        serial_session.connection(
            execution_options={'isolation_level': 'SERIALIZABLE'})
        try:
            self.repositories.availability_zone.delete(
                serial_session, name=availability_zone_name)
            serial_session.commit()
        # Handle when load balancers still reference this availability_zone
        except odb_exceptions.DBReferenceError:
            serial_session.rollback()
            raise exceptions.ObjectInUse(object='Availability Zone',
                                         id=availability_zone_name)
        except sa_exception.NoResultFound:
            serial_session.rollback()
            raise exceptions.NotFound(resource='Availability Zone',
                                      id=availability_zone_name)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    'Unknown availability_zone delete exception: %s', str(e))
                serial_session.rollback()
        finally:
            serial_session.close() 
Example #22
Source File: api.py    From zun with Apache License 2.0 5 votes vote down vote up
def destroy_registry(self, context, registry_uuid):
        session = get_session()
        with session.begin():
            query = model_query(models.Registry, session=session)
            query = add_identity_filter(query, registry_uuid)
            try:
                count = query.delete()
                if count != 1:
                    raise exception.RegistryNotFound(registry=registry_uuid)
            except db_exc.DBReferenceError:
                raise exception.Conflict('Failed to delete registry '
                                         '%(registry)s since it is in use.',
                                         registry=registry_uuid) 
Example #23
Source File: sqlalchemy.py    From gnocchi with Apache License 2.0 5 votes vote down vote up
def create_resource(self, resource_type, id,
                        creator, user_id=None, project_id=None,
                        started_at=None, ended_at=None, metrics=None,
                        original_resource_id=None,
                        **kwargs):
        if (started_at is not None
           and ended_at is not None
           and started_at > ended_at):
            raise ValueError(
                "Start timestamp cannot be after end timestamp")
        if original_resource_id is None:
            original_resource_id = str(id)
        with self.facade.writer() as session:
            resource_cls = self._resource_type_to_mappers(
                session, resource_type)['resource']
            r = resource_cls(
                id=id,
                original_resource_id=original_resource_id,
                type=resource_type,
                creator=creator,
                user_id=user_id,
                project_id=project_id,
                started_at=started_at,
                ended_at=ended_at,
                **kwargs)
            session.add(r)
            try:
                session.flush()
            except exception.DBDuplicateEntry:
                raise indexer.ResourceAlreadyExists(id)
            except exception.DBReferenceError as ex:
                raise indexer.ResourceValueError(r.type,
                                                 ex.key,
                                                 getattr(r, ex.key))
            if metrics is not None:
                self._set_metrics_for_resource(session, r, metrics)

            # NOTE(jd) Force load of metrics :)
            r.metrics

            return r 
Example #24
Source File: sqlalchemy.py    From gnocchi with Apache License 2.0 5 votes vote down vote up
def delete_archive_policy(self, name):
        constraints = [
            "fk_metric_ap_name_ap_name",
            "fk_apr_ap_name_ap_name"]
        with self.facade.writer() as session:
            try:
                if session.query(ArchivePolicy).filter(
                        ArchivePolicy.name == name).delete() == 0:
                    raise indexer.NoSuchArchivePolicy(name)
            except exception.DBReferenceError as e:
                if e.constraint in constraints:
                    raise indexer.ArchivePolicyInUse(name)
                raise 
Example #25
Source File: api.py    From karbor with Apache License 2.0 4 votes vote down vote up
def purge_deleted_rows(context, age_in_days):
    """Purge deleted rows older than age from karbor tables."""
    try:
        age_in_days = int(age_in_days)
    except ValueError:
        msg = _('Invalid valude for age, %(age)s')
        LOG.exception(msg, {'age': age_in_days})
        raise exception.InvalidParameterValue(msg % {'age': age_in_days})
    if age_in_days <= 0:
        msg = _('Must supply a positive value for age')
        LOG.exception(msg)
        raise exception.InvalidParameterValue(msg)

    engine = get_engine()
    session = get_session()
    metadata = MetaData()
    metadata.bind = engine
    tables = []

    for model_class in models.__dict__.values():
        if hasattr(model_class, "__tablename__") and hasattr(
                model_class, "deleted"):
            tables.append(model_class.__tablename__)

    # Reorder the list so the tables are last to avoid ForeignKey constraints
    # get rid of FK constraints
    for tbl in ('plans', 'scheduled_operations'):
        try:
            tables.remove(tbl)
        except ValueError:
            LOG.warning('Expected table %(tbl)s was not found in DB.',
                        **locals())
        else:
            tables.append(tbl)

    for table in tables:
        t = Table(table, metadata, autoload=True)
        LOG.info('Purging deleted rows older than age=%(age)d days from '
                 'table=%(table)s', {'age': age_in_days, 'table': table})
        deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
        try:
            with session.begin():
                result = session.execute(
                    t.delete()
                    .where(t.c.deleted_at < deleted_age))
        except db_exc.DBReferenceError:
            LOG.exception('DBError detected when purging from '
                          'table=%(table)s', {'table': table})
            raise

        rows_purged = result.rowcount
        LOG.info("Deleted %(row)d rows from table=%(table)s",
                 {'row': rows_purged, 'table': table})


###################