Python oslo_concurrency.lockutils.lock() Examples

The following are 30 code examples of oslo_concurrency.lockutils.lock(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module oslo_concurrency.lockutils , or try the search function .
Example #1
Source File: service.py    From zun with Apache License 2.0 6 votes vote down vote up
def _on_capsule_deleted(self, capsule_uuid):
        try:
            # NOTE(ndesh): We need to lock here to avoid race condition
            #              with the deletion code for CNI DEL so that
            #              we delete the registry entry exactly once
            with lockutils.lock(capsule_uuid, external=True):
                if self.registry[capsule_uuid]['vif_unplugged']:
                    LOG.debug("Remove capsule %(capsule)s from registry",
                              {'capsule': capsule_uuid})
                    del self.registry[capsule_uuid]
                else:
                    LOG.debug("Received delete for capsule %(capsule)s",
                              {'capsule': capsule_uuid})
                    capsule_dict = self.registry[capsule_uuid]
                    capsule_dict['del_received'] = True
                    self.registry[capsule_uuid] = capsule_dict
        except KeyError:
            # This means someone else removed it. It's odd but safe to ignore.
            LOG.debug('Capsule %s entry already removed from registry while '
                      'handling DELETED event. Ignoring.', capsule_uuid)
            pass 
Example #2
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 6 votes vote down vote up
def lock_files(handles_dir, out_queue):
    with lockutils.lock('external', 'test-', external=True):
        # Open some files we can use for locking
        handles = []
        for n in range(50):
            path = os.path.join(handles_dir, ('file-%s' % n))
            handles.append(open(path, 'w'))

        # Loop over all the handles and try locking the file
        # without blocking, keep a count of how many files we
        # were able to lock and then unlock. If the lock fails
        # we get an IOError and bail out with bad exit code
        count = 0
        for handle in handles:
            try:
                lock_file(handle)
                count += 1
                unlock_file(handle)
            except IOError:
                os._exit(2)
            finally:
                handle.close()
        return out_queue.put(count) 
Example #3
Source File: vterm.py    From pypowervm with Apache License 2.0 6 votes vote down vote up
def _close_vterm_local(adapter, lpar_uuid):
    """Forces the close of the terminal on a local system.

    Will check for a VNC server as well in case it was started via that
    mechanism.

    :param adapter: The adapter to talk over the API.
    :param lpar_uuid: partition uuid
    """
    lpar_id = _get_lpar_id(adapter, lpar_uuid)
    _run_proc(['rmvterm', '--id', lpar_id])

    # Stop the port.
    with lock.lock('powervm_vnc_term'):
        vnc_port = _VNC_UUID_TO_LOCAL_PORT.get(lpar_uuid, 0)
        if vnc_port in _VNC_LOCAL_PORT_TO_REPEATER:
            _VNC_LOCAL_PORT_TO_REPEATER[vnc_port].stop() 
Example #4
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 6 votes vote down vote up
def test_contextlock_unlocks(self):
        self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')

        with lockutils.lock("test") as sem:
            self.assertIsInstance(sem, threading.Semaphore)

            with lockutils.lock("test2", external=True) as lock:
                self.assertTrue(lock.exists())

            # NOTE(flaper87): Lock should be free
            with lockutils.lock("test2", external=True) as lock:
                self.assertTrue(lock.exists())

        # NOTE(flaper87): Lock should be free
        # but semaphore should already exist.
        with lockutils.lock("test") as sem2:
            self.assertEqual(sem, sem2) 
Example #5
Source File: vm.py    From nova-powervm with Apache License 2.0 6 votes vote down vote up
def power_on(adapter, instance, opts=None):
    """Powers on a VM.

    :param adapter: A pypowervm.adapter.Adapter.
    :param instance: The nova instance to power on.
    :param opts: (Optional) Additional parameters to the pypowervm power_on
                 method.  See that method's docstring for details.
    :return: True if the instance was powered on.  False if it was not in a
             startable state.
    :raises: InstancePowerOnFailure
    """
    # Synchronize power-on and power-off ops on a given instance
    with lockutils.lock('power_%s' % instance.uuid):
        entry = get_instance_wrapper(adapter, instance)

        # Get the current state and see if we can start the VM
        if entry.state in POWERVM_STARTABLE_STATE:
            # Now start the lpar
            power.power_on(entry, None, add_parms=opts)
            return True

    return False 
Example #6
Source File: service.py    From kuryr-kubernetes with Apache License 2.0 6 votes vote down vote up
def on_deleted(self, pod):
        pod_name = utils.get_pod_unique_name(pod)
        try:
            if pod_name in self.registry:
                # NOTE(ndesh): We need to lock here to avoid race condition
                #              with the deletion code for CNI DEL so that
                #              we delete the registry entry exactly once
                with lockutils.lock(pod_name, external=True):
                    if self.registry[pod_name]['vif_unplugged']:
                        del self.registry[pod_name]
                    else:
                        pod_dict = self.registry[pod_name]
                        pod_dict['del_received'] = True
                        self.registry[pod_name] = pod_dict
        except KeyError:
            # This means someone else removed it. It's odd but safe to ignore.
            LOG.debug('Pod %s entry already removed from registry while '
                      'handling DELETED event. Ignoring.', pod_name)
            pass 
Example #7
Source File: rating.py    From cloudkitty with Apache License 2.0 6 votes vote down vote up
def expose_modules(self):
        """Load rating modules to expose API controllers."""
        lock = lockutils.lock('rating-modules')
        with lock:
            for ext in self.extensions:
                # FIXME(sheeprine): we should notify two modules with same name
                name = ext.name
                if not ext.obj.config_controller:
                    ext.obj.config_controller = UnconfigurableController
                # Update extension reference
                setattr(self, name, ext.obj.config_controller())
                if name in self._loaded_modules:
                    self._loaded_modules.remove(name)
            # Clear removed modules
            for module in self._loaded_modules:
                delattr(self, module)
            self._loaded_modules = self.extensions.names() 
Example #8
Source File: rating.py    From cloudkitty with Apache License 2.0 6 votes vote down vote up
def get_one(self, module_id):
        """return a module

        :return: CloudKittyModule
        """
        policy.authorize(pecan.request.context, 'rating:get_module', {})

        try:
            lock = lockutils.lock('rating-modules')
            with lock:
                module = self.extensions[module_id]
        except KeyError:
            pecan.abort(404, 'Module not found.')
        infos = module.obj.module_info.copy()
        infos['module_id'] = infos.pop('name')
        return rating_models.CloudkittyModule(**infos) 
Example #9
Source File: server.py    From coriolis with GNU Affero General Public License v3.0 6 votes vote down vote up
def cancel_migration(self, ctxt, migration_id, force):
        migration = self._get_migration(ctxt, migration_id)
        if len(migration.executions) != 1:
            raise exception.InvalidMigrationState(
                "Migration '%s' has in improper number of tasks "
                "executions: %d" % (migration_id, len(migration.executions)))
        execution = migration.executions[0]
        if execution.status not in constants.ACTIVE_EXECUTION_STATUSES:
            raise exception.InvalidMigrationState(
                "Migration '%s' is not currently running" % migration_id)
        if execution.status == constants.EXECUTION_STATUS_CANCELLING and (
                not force):
            raise exception.InvalidMigrationState(
                "Migration '%s' is already being cancelled. Please use the "
                "force option if you'd like to force-cancel it.")

        with lockutils.lock(
                constants.EXECUTION_LOCK_NAME_FORMAT % execution.id,
                external=True):
            self._cancel_tasks_execution(ctxt, execution, force=force)
        self._check_delete_reservation_for_transfer(migration) 
Example #10
Source File: rating.py    From cloudkitty with Apache License 2.0 6 votes vote down vote up
def get_all(self):
        """return the list of loaded modules.

        :return: name of every loaded modules.
        """
        policy.authorize(pecan.request.context, 'rating:list_modules', {})

        modules_list = []
        lock = lockutils.lock('rating-modules')
        with lock:
            for module in self.extensions:
                infos = module.obj.module_info.copy()
                infos['module_id'] = infos.pop('name')
                modules_list.append(rating_models.CloudkittyModule(**infos))

        return rating_models.CloudkittyModuleCollection(
            modules=modules_list) 
Example #11
Source File: vm.py    From nova-powervm with Apache License 2.0 5 votes vote down vote up
def reboot(adapter, instance, hard):
    """Reboots a VM.

    :param adapter: A pypowervm.adapter.Adapter.
    :param instance: The nova instance to reboot.
    :param hard: Boolean True if hard reboot, False otherwise.
    :raises: InstanceRebootFailure
    """
    # Synchronize power-on and power-off ops on a given instance
    with lockutils.lock('power_%s' % instance.uuid):
        try:
            entry = get_instance_wrapper(adapter, instance)
            if entry.state != pvm_bp.LPARState.NOT_ACTIVATED:
                if hard:
                    power.PowerOp.stop(
                        entry, opts=popts.PowerOffOpts().vsp_hard().restart())
                else:
                    power.power_off_progressive(entry, restart=True)
            else:
                # pypowervm does NOT throw an exception if "already down".
                # Any other exception from pypowervm is a legitimate failure;
                # let it raise up.
                # If we get here, pypowervm thinks the instance is down.
                power.power_on(entry, None)
        except Exception as e:
            LOG.exception("Failed to reboot instance.", instance=instance)
            raise exception.InstanceRebootFailure(reason=six.text_type(e)) 
Example #12
Source File: event.py    From nova-powervm with Apache License 2.0 5 votes vote down vote up
def _get_instance(inst, pvm_uuid):
    global _CONTEXT
    if inst is not None:
        return inst
    with lockutils.lock('get_context_once'):
        if _CONTEXT is None:
            _CONTEXT = ctx.get_admin_context()
    LOG.debug('PowerVM Nova Event Handler: Getting inst for id %s', pvm_uuid)
    return vm.get_instance(_CONTEXT, pvm_uuid) 
Example #13
Source File: localdisk.py    From nova-powervm with Apache License 2.0 5 votes vote down vote up
def _get_or_upload_image(self, context, image_meta):
        """Return a cached image name

        Attempt to find a cached copy of the image. If there is no cached copy
        of the image, create one.

        :param context: nova context used to retrieve image from glance
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :return: The name of the virtual disk containing the image
        """

        # Check for cached image
        with lockutils.lock(image_meta.id):
            vg_wrap = self._get_vg_wrap()
            cache_name = self.get_name_by_uuid(disk_dvr.DiskType.IMAGE,
                                               image_meta.id, short=True)
            image = [disk for disk in vg_wrap.virtual_disks
                     if disk.name == cache_name]
            if len(image) == 1:
                return image[0].udid

            image = tsk_stg.upload_new_vdisk(
                self.adapter, self._vios_uuid, self.vg_uuid,
                disk_dvr.IterableToFileAdapter(
                    IMAGE_API.download(context, image_meta.id)), cache_name,
                image_meta.size, d_size=image_meta.size,
                upload_type=tsk_stg.UploadType.IO_STREAM,
                file_format=image_meta.disk_format)[0]
            return image.udid 
Example #14
Source File: iscsi.py    From nova-powervm with Apache License 2.0 5 votes vote down vote up
def _connect_volume_to_vio(self, vios_w, slot_mgr):
        """Attempts to connect a volume to a given VIO.

        :param vios_w: The Virtual I/O Server wrapper to connect to.
        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM

        :return: True if the volume was connected.  False if the volume was
                 not (could be the Virtual I/O Server does not have
                 connectivity to the hdisk).
        """
        # check if the vios uuid exist in the expected vios list
        if vios_w.uuid not in self.vios_uuids:
            LOG.debug("Skipping connect volume %(vol)s from "
                      "inactive vios uuid %(uuid)s.",
                      dict(vol=self.volume_id, uuid=vios_w.uuid))
            return False

        device_name, udid = self._discover_volume_on_vios(vios_w)
        if device_name is not None and udid is not None:
            slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, device_name)
            volume_id = self.connection_info["data"]["volume_id"]
            # Found a hdisk on this Virtual I/O Server.  Add the action to
            # map it to the VM when the stg_ftsk is executed.
            with lockutils.lock(hash(self)):
                self._add_append_mapping(
                    vios_w.uuid, device_name, lpar_slot_num=slot, lua=lua,
                    udid=udid, tag=volume_id)

            # Save the udid  for the disk in the connection info.  It is
            # used for the detach.
            self._set_udid(udid)

            LOG.debug('Device attached: %s', device_name,
                      instance=self.instance)

            # Valid attachment
            return True

        return False 
Example #15
Source File: lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, lock_file_prefix=None):
        self.mgr = lockutils.lock(name, lock_file_prefix, True) 
Example #16
Source File: server.py    From coriolis with GNU Affero General Public License v3.0 5 votes vote down vote up
def _update_volumes_info_for_migration_parent_replica(
            self, ctxt, migration_id, instance, updated_task_info):
        migration = db_api.get_migration(ctxt, migration_id)
        replica_id = migration.replica_id

        with lockutils.lock(
                constants.REPLICA_LOCK_NAME_FORMAT % replica_id,
                external=True):
            LOG.debug(
                "Updating volume_info in replica due to snapshot "
                "restore during migration. replica id: %s", replica_id)
            self._update_replica_volumes_info(
                ctxt, replica_id, instance, updated_task_info) 
Example #17
Source File: lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        super(LockFixture, self).setUp()
        self.addCleanup(self.mgr.__exit__, None, None, None)
        self.lock = self.mgr.__enter__() 
Example #18
Source File: rating.py    From cloudkitty with Apache License 2.0 5 votes vote down vote up
def reload_extensions(self):
        lock = lockutils.lock('rating-modules')
        with lock:
            ck_utils.refresh_stevedore(PROCESSORS_NAMESPACE)
            # FIXME(sheeprine): Implement RPC messages to trigger reload on
            # processors
            self.extensions = extension.ExtensionManager(
                PROCESSORS_NAMESPACE,
                # FIXME(sheeprine): don't want to load it here as we just need
                # the controller
                invoke_on_load=True)
            if not self._first_call:
                self.notify_reload()
            else:
                self._first_call = False 
Example #19
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_lock_internally(self):
        """We can lock across multiple threads."""
        saved_sem_num = len(lockutils._semaphores)
        seen_threads = list()

        def f(_id):
            with lockutils.lock('testlock2', 'test-', external=False):
                for x in range(10):
                    seen_threads.append(_id)

        threads = []
        for i in range(10):
            thread = threading.Thread(target=f, args=(i,))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

        self.assertEqual(100, len(seen_threads))
        # Looking at the seen threads, split it into chunks of 10, and verify
        # that the last 9 match the first in each chunk.
        for i in range(10):
            for j in range(9):
                self.assertEqual(seen_threads[i * 10],
                                 seen_threads[i * 10 + 1 + j])

        self.assertEqual(saved_sem_num, len(lockutils._semaphores),
                         "Semaphore leak detected") 
Example #20
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_lock_internal_fair(self):
        """Check that we're actually fair."""

        def f(_id):
            with lockutils.lock('testlock', 'test-',
                                external=False, fair=True):
                lock_holder.append(_id)

        lock_holder = []
        threads = []
        # While holding the fair lock, spawn a bunch of threads that all try
        # to acquire the lock.  They will all block.  Then release the lock
        # and see what happens.
        with lockutils.lock('testlock', 'test-', external=False, fair=True):
            for i in range(10):
                thread = threading.Thread(target=f, args=(i,))
                threads.append(thread)
                thread.start()
                # Allow some time for the new thread to get queued onto the
                # list of pending writers before continuing.  This is gross
                # but there's no way around it without using knowledge of
                # fasteners internals.
                time.sleep(0.5)
        # Wait for all threads.
        for thread in threads:
            thread.join()

        self.assertEqual(10, len(lock_holder))
        # Check that the threads each got the lock in fair order.
        for i in range(10):
            self.assertEqual(i, lock_holder[i]) 
Example #21
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def _do_test_lock_externally(self):
        """We can lock across multiple processes."""
        children = []
        for n in range(50):
            queue = multiprocessing.Queue()
            proc = multiprocessing.Process(
                target=lock_files,
                args=(tempfile.mkdtemp(), queue))
            proc.start()
            children.append((proc, queue))
        for child, queue in children:
            child.join()
            count = queue.get(block=False)
            self.assertEqual(50, count) 
Example #22
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_synchronized_without_prefix(self):
        self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')

        @lockutils.synchronized('lock', external=True)
        def test_without_prefix():
            # We can't check much
            pass

        test_without_prefix() 
Example #23
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_synchronized_prefix_without_hypen(self):
        self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')

        @lockutils.synchronized('lock', 'hypen', True)
        def test_without_hypen():
            # We can't check much
            pass

        test_without_hypen() 
Example #24
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_contextlock(self):
        self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')

        # Note(flaper87): Lock is not external, which means
        # a semaphore will be yielded
        with lockutils.lock("test") as sem:
            self.assertIsInstance(sem, threading.Semaphore)

            # NOTE(flaper87): Lock is external so an InterProcessLock
            # will be yielded.
            with lockutils.lock("test2", external=True) as lock:
                self.assertTrue(lock.exists())

            with lockutils.lock("test1", external=True) as lock1:
                self.assertIsInstance(lock1, lockutils.InterProcessLock) 
Example #25
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_lock_file_exists(self):
        lock_file = os.path.join(self.lock_dir, 'lock-file')

        @lockutils.synchronized('lock-file', external=True,
                                lock_path=self.lock_dir)
        def foo():
            self.assertTrue(os.path.exists(lock_file))

        foo() 
Example #26
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_interthread_external_lock(self):
        call_list = []

        @lockutils.synchronized('foo', external=True, lock_path=self.lock_dir)
        def foo(param):
            """Simulate a long-running threaded operation."""
            call_list.append(param)
            # NOTE(bnemec): This is racy, but I don't want to add any
            # synchronization primitives that might mask a problem
            # with the one we're trying to test here.
            time.sleep(.5)
            call_list.append(param)

        def other(param):
            foo(param)

        thread = threading.Thread(target=other, args=('other',))
        thread.start()
        # Make sure the other thread grabs the lock
        # NOTE(bnemec): File locks do not actually work between threads, so
        # this test is verifying that the local semaphore is still enforcing
        # external locks in that case.  This means this test does not have
        # the same race problem as the process test above because when the
        # file is created the semaphore has already been grabbed.
        start = time.time()
        while not os.path.exists(os.path.join(self.lock_dir, 'foo')):
            if time.time() - start > 5:
                self.fail('Timed out waiting for thread to grab lock')
            time.sleep(0)
        thread1 = threading.Thread(target=other, args=('main',))
        thread1.start()
        thread1.join()
        thread.join()
        self.assertEqual(['other', 'other', 'main', 'main'], call_list) 
Example #27
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def test_non_destructive(self):
        lock_file = os.path.join(self.lock_dir, 'not-destroyed')
        with open(lock_file, 'w') as f:
            f.write('test')
        with lockutils.lock('not-destroyed', external=True,
                            lock_path=self.lock_dir):
            with open(lock_file) as f:
                self.assertEqual('test', f.read()) 
Example #28
Source File: test_lockutils.py    From oslo.concurrency with Apache License 2.0 5 votes vote down vote up
def _check_in_lock(self):
        self.assertTrue(self.lock.exists()) 
Example #29
Source File: server.py    From coriolis with GNU Affero General Public License v3.0 5 votes vote down vote up
def _update_replica_volumes_info(self, ctxt, replica_id, instance,
                                     updated_task_info):
        """ WARN: the lock for the Replica must be pre-acquired. """
        db_api.update_transfer_action_info_for_instance(
            ctxt, replica_id, instance,
            updated_task_info) 
Example #30
Source File: singleton.py    From zun with Apache License 2.0 5 votes vote down vote up
def __call__(cls, *args, **kwargs):
        with lockutils.lock('singleton_lock', semaphores=cls._semaphores):
            if cls not in cls._instances:
                cls._instances[cls] = super(
                    Singleton, cls).__call__(*args, **kwargs)
        return cls._instances[cls]