Python oslo_db.exception.DBDuplicateEntry() Examples
The following are 30
code examples of oslo_db.exception.DBDuplicateEntry().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
oslo_db.exception
, or try the search function
.
Example #1
Source File: l7policy.py From octavia with Apache License 2.0 | 6 votes |
def _validate_create_l7policy(self, lock_session, l7policy_dict): try: # Set the default HTTP redirect code here so it's explicit if ((l7policy_dict.get('redirect_url') or l7policy_dict.get('redirect_prefix')) and not l7policy_dict.get('redirect_http_code')): l7policy_dict['redirect_http_code'] = 302 return self.repositories.l7policy.create(lock_session, **l7policy_dict) except odb_exceptions.DBDuplicateEntry: raise exceptions.IDAlreadyExists() except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='')
Example #2
Source File: gateway_device.py From networking-midonet with Apache License 2.0 | 6 votes |
def create_gateway_device_remote_mac_entry(self, context, gateway_device_id, remote_mac_entry): rme = remote_mac_entry['remote_mac_entry'] try: with context.session.begin(subtransactions=True): gw_rmt_db = GatewayRemoteMacTable( id=uuidutils.generate_uuid(), device_id=gateway_device_id, mac_address=rme['mac_address'], segmentation_id=rme['segmentation_id'], vtep_address=rme['vtep_address']) context.session.add(gw_rmt_db) except db_exc.DBDuplicateEntry: raise gw_device_ext.DuplicateRemoteMacEntry( mac_address=rme['mac_address']) return self._make_remote_mac_dict(gw_rmt_db)
Example #3
Source File: vnf_package.py From tacker with Apache License 2.0 | 6 votes |
def _add_user_defined_data(context, package_uuid, user_data, max_retries=10): for attempt in range(max_retries): try: with db_api.context_manager.writer.using(context): new_entries = [] for key, value in user_data.items(): new_entries.append({"key": key, "value": value, "package_uuid": package_uuid}) if new_entries: context.session.execute( models.VnfPackageUserData.__table__.insert(None), new_entries) return user_data except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt with excutils.save_and_reraise_exception() as context: if attempt < max_retries - 1: context.reraise = False else: raise exceptions.UserDataUpdateCreateFailed( id=package_uuid, retries=max_retries)
Example #4
Source File: api.py From zun with Apache License 2.0 | 6 votes |
def create_container(self, context, values): # ensure defaults are present for new containers if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() if values.get('name'): self._validate_unique_container_name(context, values['name']) container = models.Container() container.update(values) try: container.save() except db_exc.DBDuplicateEntry: raise exception.ContainerAlreadyExists(field='UUID', value=values['uuid']) return container
Example #5
Source File: api.py From cloudkitty with Apache License 2.0 | 6 votes |
def create_field(self, service_uuid, name): service_db = self.get_service(uuid=service_uuid) session = db.get_session() try: with session.begin(): field_db = models.HashMapField( service_id=service_db.id, name=name, field_id=uuidutils.generate_uuid()) session.add(field_db) # FIXME(sheeprine): backref are not populated as they used to be. # Querying the item again to get backref. field_db = self.get_field(service_uuid=service_uuid, name=name) except exception.DBDuplicateEntry: field_db = self.get_field(service_uuid=service_uuid, name=name) raise api.FieldAlreadyExists(field_db.name, field_db.field_id) else: return field_db
Example #6
Source File: sqlalchemy.py From gnocchi with Apache License 2.0 | 6 votes |
def create_archive_policy_rule(self, name, metric_pattern, archive_policy_name): apr = ArchivePolicyRule( name=name, archive_policy_name=archive_policy_name, metric_pattern=metric_pattern ) try: with self.facade.writer() as session: session.add(apr) except exception.DBReferenceError as e: if e.constraint == 'fk_apr_ap_name_ap_name': raise indexer.NoSuchArchivePolicy(archive_policy_name) raise except exception.DBDuplicateEntry: raise indexer.ArchivePolicyRuleAlreadyExists(name) return apr
Example #7
Source File: lockedobjects_db.py From dragonflow with Apache License 2.0 | 6 votes |
def _test_and_create_object(uuid): try: session = db_api.get_writer_session() with session.begin(): row = session.query(models.DFLockedObjects).filter_by( object_uuid=uuid).one() # test ttl if row.lock and timeutils.is_older_than( row.created_at, cfg.CONF.df.distributed_lock_ttl): # reset the lock if it is timeout LOG.warning('The lock for object %(id)s is reset ' 'due to timeout.', {'id': uuid}) _lock_free_update(session, uuid, lock_state=True, session_id=row.session_id) except orm_exc.NoResultFound: try: session = db_api.get_writer_session() with session.begin(): _create_db_row(session, oid=uuid) except db_exc.DBDuplicateEntry: # the lock is concurrently created. pass
Example #8
Source File: bgpvpn_db.py From networking-bgpvpn with Apache License 2.0 | 6 votes |
def create_port_assoc(self, context, bgpvpn_id, port_association): port_id = port_association['port_id'] advertise_fixed_ips = port_association['advertise_fixed_ips'] try: with db_api.CONTEXT_WRITER.using(context): port_assoc_db = BGPVPNPortAssociation( tenant_id=port_association['tenant_id'], bgpvpn_id=bgpvpn_id, port_id=port_id, advertise_fixed_ips=advertise_fixed_ips) context.session.add(port_assoc_db) except db_exc.DBDuplicateEntry: LOG.warning(("port %(port_id)s is already associated to " "BGPVPN %(bgpvpn_id)s"), {'port_id': port_id, 'bgpvpn_id': bgpvpn_id}) raise bgpvpn_rc_ext.BGPVPNPortAssocAlreadyExists( bgpvpn_id=bgpvpn_id, port_id=port_association['port_id']) with db_api.CONTEXT_WRITER.using(context): for route in port_association['routes']: _add_port_assoc_route_db_from_dict(context, route, port_assoc_db.id) return self._make_port_assoc_dict(port_assoc_db)
Example #9
Source File: test_database_tasks.py From octavia with Apache License 2.0 | 6 votes |
def test_create_vrrp_group_for_lb(self, mock_vrrp_group_create, mock_generate_uuid, mock_LOG, mock_get_session, mock_loadbalancer_repo_update, mock_listener_repo_update, mock_amphora_repo_update, mock_amphora_repo_delete): mock_get_session.side_effect = ['TEST', odb_exceptions.DBDuplicateEntry] create_vrrp_group = database_tasks.CreateVRRPGroupForLB() create_vrrp_group.execute(self.loadbalancer_mock) mock_vrrp_group_create.assert_called_once_with( 'TEST', load_balancer_id=LB_ID, vrrp_group_name=LB_ID.replace('-', ''), vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass=mock_generate_uuid.return_value.replace('-', '')[0:7], advert_int=1) create_vrrp_group.execute(self.loadbalancer_mock)
Example #10
Source File: bgpvpn_db.py From networking-bgpvpn with Apache License 2.0 | 6 votes |
def create_router_assoc(self, context, bgpvpn_id, router_association): router_id = router_association['router_id'] try: with db_api.CONTEXT_WRITER.using(context): router_assoc_db = BGPVPNRouterAssociation( tenant_id=router_association['tenant_id'], bgpvpn_id=bgpvpn_id, router_id=router_id) context.session.add(router_assoc_db) return self._make_router_assoc_dict(router_assoc_db) except db_exc.DBDuplicateEntry: LOG.warning("router %(router_id)s is already associated to " "BGPVPN %(bgpvpn_id)s", {'router_id': router_id, 'bgpvpn_id': bgpvpn_id}) raise bgpvpn_ext.BGPVPNRouterAssocAlreadyExists( bgpvpn_id=bgpvpn_id, router_id=router_association['router_id'])
Example #11
Source File: bgpvpn_db.py From networking-bgpvpn with Apache License 2.0 | 6 votes |
def create_net_assoc(self, context, bgpvpn_id, net_assoc): try: with db_api.CONTEXT_WRITER.using(context): net_assoc_db = BGPVPNNetAssociation( tenant_id=net_assoc['tenant_id'], bgpvpn_id=bgpvpn_id, network_id=net_assoc['network_id']) context.session.add(net_assoc_db) return self._make_net_assoc_dict(net_assoc_db) except db_exc.DBDuplicateEntry: LOG.warning("network %(net_id)s is already associated to " "BGPVPN %(bgpvpn_id)s", {'net_id': net_assoc['network_id'], 'bgpvpn_id': bgpvpn_id}) raise bgpvpn_ext.BGPVPNNetAssocAlreadyExists( bgpvpn_id=bgpvpn_id, net_id=net_assoc['network_id'])
Example #12
Source File: api.py From zun with Apache License 2.0 | 6 votes |
def create_registry(self, context, values): # ensure defaults are present for new registries if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() original_password = values.get('password') if original_password: values['password'] = crypt.encrypt(values.get('password')) registry = models.Registry() registry.update(values) try: registry.save() except db_exc.DBDuplicateEntry: raise exception.RegistryAlreadyExists( field='UUID', value=values['uuid']) if original_password: # the password is encrypted but we want to return the original # password registry['password'] = original_password return registry
Example #13
Source File: api.py From ec2-api with Apache License 2.0 | 6 votes |
def add_item(context, kind, data): item_ref = models.Item() item_ref.update({ "project_id": context.project_id, "id": _new_id(kind), }) item_ref.update(_pack_item_data(data)) try: item_ref.save() except db_exception.DBDuplicateEntry as ex: if (models.ITEMS_OS_ID_INDEX_NAME not in ex.columns and 'os_id' not in ex.columns): raise item_ref = (model_query(context, models.Item). filter_by(os_id=data["os_id"]). filter(or_(models.Item.project_id == context.project_id, models.Item.project_id.is_(None))). filter(models.Item.id.like('%s-%%' % kind)). one()) item_data = _unpack_item_data(item_ref) item_data.update(data) item_ref.update(_pack_item_data(item_data)) item_ref.project_id = context.project_id item_ref.save() return _unpack_item_data(item_ref)
Example #14
Source File: api.py From ec2-api with Apache License 2.0 | 6 votes |
def add_item_id(context, kind, os_id, project_id=None): item_ref = models.Item() item_ref.update({ "id": _new_id(kind), "os_id": os_id, }) if project_id: item_ref.project_id = project_id try: item_ref.save() except db_exception.DBDuplicateEntry as ex: if (models.ITEMS_OS_ID_INDEX_NAME not in ex.columns and ex.columns != ['os_id']): raise item_ref = (model_query(context, models.Item). filter_by(os_id=os_id). one()) return item_ref.id
Example #15
Source File: test_database_tasks.py From octavia with Apache License 2.0 | 6 votes |
def test_create_vrrp_group_for_lb(self, mock_vrrp_group_create, mock_generate_uuid, mock_LOG, mock_get_session, mock_loadbalancer_repo_update, mock_listener_repo_update, mock_amphora_repo_update, mock_amphora_repo_delete): mock_get_session.side_effect = ['TEST', odb_exceptions.DBDuplicateEntry] create_vrrp_group = database_tasks.CreateVRRPGroupForLB() create_vrrp_group.execute(_loadbalancer_mock.id) mock_vrrp_group_create.assert_called_once_with( 'TEST', load_balancer_id=LB_ID, vrrp_group_name=LB_ID.replace('-', ''), vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass=mock_generate_uuid.return_value.replace('-', '')[0:7], advert_int=1) create_vrrp_group.execute(_loadbalancer_mock)
Example #16
Source File: api.py From ec2-api with Apache License 2.0 | 6 votes |
def add_tags(context, tags): session = get_session() get_query = (model_query(context, models.Tag, session=session). filter_by(project_id=context.project_id, # NOTE(ft): item_id param name is reserved for # sqlalchemy internal use item_id=bindparam('tag_item_id'), key=bindparam('tag_key'))) with session.begin(): for tag in tags: tag_ref = models.Tag(project_id=context.project_id, item_id=tag['item_id'], key=tag['key'], value=tag['value']) try: with session.begin(nested=True): tag_ref.save(session) except db_exception.DBDuplicateEntry as ex: if ('PRIMARY' not in ex.columns and ex.columns != ['project_id', 'item_id', 'key']): raise (get_query.params(tag_item_id=tag['item_id'], tag_key=tag['key']). update({'value': tag['value']}))
Example #17
Source File: database_tasks.py From octavia with Apache License 2.0 | 6 votes |
def execute(self, loadbalancer): """Create a VRRP group for a load balancer. :param loadbalancer: Load balancer for which a VRRP group should be created :returns: Updated load balancer """ try: self.repos.vrrpgroup.create( db_apis.get_session(), load_balancer_id=loadbalancer[constants.LOADBALANCER_ID], vrrp_group_name=str( loadbalancer[constants.LOADBALANCER_ID]).replace('-', ''), vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], advert_int=CONF.keepalived_vrrp.vrrp_advert_int) except odb_exceptions.DBDuplicateEntry: LOG.debug('VRRP_GROUP entry already exists for load balancer, ' 'skipping create.') return loadbalancer
Example #18
Source File: api.py From manila with Apache License 2.0 | 6 votes |
def share_type_access_add(context, type_id, project_id): """Add given tenant to the share type access list.""" share_type_id = _share_type_get_id_from_share_type(context, type_id) access_ref = models.ShareTypeProjects() access_ref.update({"share_type_id": share_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exception.DBDuplicateEntry: raise exception.ShareTypeAccessExists(share_type_id=type_id, project_id=project_id) return access_ref
Example #19
Source File: database_tasks.py From octavia with Apache License 2.0 | 6 votes |
def execute(self, loadbalancer_id): """Create a VRRP group for a load balancer. :param loadbalancer_id: Load balancer ID for which a VRRP group should be created """ try: self.repos.vrrpgroup.create( db_apis.get_session(), load_balancer_id=loadbalancer_id, vrrp_group_name=str(loadbalancer_id).replace('-', ''), vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], advert_int=CONF.keepalived_vrrp.vrrp_advert_int) except odb_exceptions.DBDuplicateEntry: LOG.debug('VRRP_GROUP entry already exists for load balancer, ' 'skipping create.')
Example #20
Source File: member.py From octavia with Apache License 2.0 | 6 votes |
def _validate_create_member(self, lock_session, member_dict): """Validate creating member on pool.""" try: return self.repositories.member.create(lock_session, **member_dict) except odb_exceptions.DBDuplicateEntry as de: column_list = ['pool_id', 'ip_address', 'protocol_port'] constraint_list = ['uq_member_pool_id_address_protocol_port'] if ['id'] == de.columns: raise exceptions.IDAlreadyExists() if (set(column_list) == set(de.columns) or set(constraint_list) == set(de.columns)): raise exceptions.DuplicateMemberEntry( ip_address=member_dict.get('ip_address'), port=member_dict.get('protocol_port')) except odb_exceptions.DBError: # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='')
Example #21
Source File: api.py From magnum with Apache License 2.0 | 5 votes |
def create_cluster(self, values): # ensure defaults are present for new clusters if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() cluster = models.Cluster() cluster.update(values) try: cluster.save() except db_exc.DBDuplicateEntry: raise exception.ClusterAlreadyExists(uuid=values['uuid']) return cluster
Example #22
Source File: api.py From magnum with Apache License 2.0 | 5 votes |
def create_quota(self, values): quotas = models.Quota() quotas.update(values) try: quotas.save() except db_exc.DBDuplicateEntry: raise exception.QuotaAlreadyExists(project_id=values['project_id'], resource=values['resource']) return quotas
Example #23
Source File: api.py From magnum with Apache License 2.0 | 5 votes |
def create_x509keypair(self, values): # ensure defaults are present for new x509keypairs if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() x509keypair = models.X509KeyPair() x509keypair.update(values) try: x509keypair.save() except db_exc.DBDuplicateEntry: raise exception.X509KeyPairAlreadyExists(uuid=values['uuid']) return x509keypair
Example #24
Source File: api.py From magnum with Apache License 2.0 | 5 votes |
def create_magnum_service(self, values): magnum_service = models.MagnumService() magnum_service.update(values) try: magnum_service.save() except db_exc.DBDuplicateEntry: host = values["host"] binary = values["binary"] LOG.warning("Magnum service with same host:%(host)s and" " binary:%(binary)s had been saved into DB", {'host': host, 'binary': binary}) query = model_query(models.MagnumService) query = query.filter_by(host=host, binary=binary) return query.one() return magnum_service
Example #25
Source File: test_lib.py From networking-l2gw with Apache License 2.0 | 5 votes |
def test_add_ucast_mac_remote_raise_on_duplicate_constraint(self): record_dict = self._get_ucast_mac_remote_dict() self._create_ucast_mac_remote(record_dict) # Call the method twice to trigger a db duplicate constraint error, # this time with a different mac and logical switch id! self.assertRaises(d_exc.DBDuplicateEntry, self._create_ucast_mac_remote, record_dict, '11:22:33:44:55:66:77', _uuid())
Example #26
Source File: api.py From magnum with Apache License 2.0 | 5 votes |
def create_cluster_template(self, values): # ensure defaults are present for new ClusterTemplates if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() cluster_template = models.ClusterTemplate() cluster_template.update(values) try: cluster_template.save() except db_exc.DBDuplicateEntry: raise exception.ClusterTemplateAlreadyExists(uuid=values['uuid']) return cluster_template
Example #27
Source File: api.py From magnum with Apache License 2.0 | 5 votes |
def create_nodegroup(self, values): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() nodegroup = models.NodeGroup() nodegroup.update(values) try: nodegroup.save() except db_exc.DBDuplicateEntry: raise exception.NodeGroupAlreadyExists( cluster_id=values['cluster_id'], name=values['name']) return nodegroup
Example #28
Source File: test_lib.py From networking-l2gw with Apache License 2.0 | 5 votes |
def test_add_vlan_binding_raise_on_duplicate_constraint(self): record_dict = self._get_vlan_binding_dict() self._create_vlan_binding(record_dict) # Call the method twice to trigger a db duplicate constraint error, # this time with a same entries self.assertRaises(d_exc.DBDuplicateEntry, self._create_vlan_binding, record_dict)
Example #29
Source File: bgp_dragentscheduler_db.py From neutron-dynamic-routing with Apache License 2.0 | 5 votes |
def add_bgp_speaker_to_dragent(self, context, agent_id, speaker_id): """Associate a BgpDrAgent with a BgpSpeaker.""" try: self._save_bgp_speaker_dragent_binding(context, agent_id, speaker_id) except db_exc.DBDuplicateEntry: raise bgp_dras_ext.DrAgentAssociationError( agent_id=agent_id) LOG.debug('BgpSpeaker %(bgp_speaker_id)s added to ' 'BgpDrAgent %(agent_id)s', {'bgp_speaker_id': speaker_id, 'agent_id': agent_id})
Example #30
Source File: bgp_speaker_router_insertion_db.py From networking-midonet with Apache License 2.0 | 5 votes |
def set_router_for_bgp_speaker(self, context, bgp_sp_id, r_id): """Sets the routers associated with the bgp speaker.""" try: with db_api.CONTEXT_WRITER.using(context): bgp_router_db = model.BgpSpeakerRouterAssociation( bgp_speaker_id=bgp_sp_id, router_id=r_id) context.session.add(bgp_router_db) except db_exc.DBDuplicateEntry: raise l3_exc.RouterInUse( router_id=r_id, reason='is already associated with bgp speaker') except db_exc.DBReferenceError: raise l3_exc.RouterNotFound(router_id=r_id)