Python filelock.Timeout() Examples
The following are 21
code examples of filelock.Timeout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
filelock
, or try the search function
.
Example #1
Source File: local.py From cloudstorage with MIT License | 6 votes |
def lock_local_file(path: str) -> filelock.FileLock: """Platform dependent file lock. :param path: File or directory path to lock. :type path: str :yield: File lock context manager. :yield type: :class:`filelock.FileLock` :raise CloudStorageError: If lock could not be acquired. """ lock = filelock.FileLock(path + ".lock") try: lock.acquire(timeout=0.1) except filelock.Timeout: raise CloudStorageError("Lock timeout") yield lock if lock.is_locked: lock.release() if os.path.exists(lock.lock_file): os.remove(lock.lock_file)
Example #2
Source File: test.py From py-filelock with The Unlicense | 6 votes |
def test_del(self): """ Tests, if the lock is released, when the object is deleted. """ lock1 = self.LOCK_TYPE(self.LOCK_PATH) lock2 = self.LOCK_TYPE(self.LOCK_PATH) # Acquire lock 1. lock1.acquire() self.assertTrue(lock1.is_locked) self.assertFalse(lock2.is_locked) # Try to acquire lock 2. self.assertRaises(filelock.Timeout, lock2.acquire, timeout = 1) # FIXME (SoftFileLock) # Delete lock 1 and try to acquire lock 2 again. del lock1 lock2.acquire() self.assertTrue(lock2.is_locked) lock2.release() return None
Example #3
Source File: summary.py From chainerui with MIT License | 6 votes |
def save(self, out, timeout): filepath = os.path.join(out, self.filename) lockpath = filepath + '.lock' try: with filelock.FileLock(lockpath, timeout=timeout): saved_assets_list = [] if os.path.isfile(filepath): with open(filepath) as f: saved_assets_list = json.load(f) saved_assets_list.extend(self.cache[self.saved_idx:]) with open(filepath, 'w') as f: json.dump(saved_assets_list, f, indent=4) self.saved_idx = len(self.cache) except filelock.Timeout: logger.error('Process to write a list of assets is timeout')
Example #4
Source File: exception_handler.py From renku-python with Apache License 2.0 | 6 votes |
def main(self, *args, **kwargs): """Catch all exceptions.""" try: result = super().main(*args, **kwargs) return result except filelock.Timeout: click.echo(( click.style( 'Unable to acquire lock.\n', fg='red', ) + 'Hint: Please wait for another renku ' 'process to finish and then try again.' )) except Exception: if HAS_SENTRY: self._handle_sentry() if not (sys.stdin.isatty() and sys.stdout.isatty()): raise self._handle_github()
Example #5
Source File: app.py From resilient-python-api with MIT License | 6 votes |
def run(*args, **kwargs): """Main app""" # define lock # this prevents multiple, identical circuits from running at the same time lock = get_lock() # The main app component initializes the Resilient services global application try: # attempt to lock file, wait 1 second for lock with lock.acquire(timeout=1): assert lock.is_locked application = App(*args, **kwargs) application.run() except filelock.Timeout: # file is probably already locked print("Failed to acquire lock on {0} - " "you may have another instance of Resilient Circuits running".format(os.path.abspath(lock.lock_file))) except OSError as exc: # Some other problem accessing the lockfile print("Unable to lock {0}: {1}".format(os.path.abspath(lock.lock_file), exc)) # finally: # LOG.info("App finished.")
Example #6
Source File: test.py From py-filelock with The Unlicense | 6 votes |
def test_timeout(self): """ Tests if the lock raises a TimeOut error, when it can not be acquired. """ lock1 = self.LOCK_TYPE(self.LOCK_PATH) lock2 = self.LOCK_TYPE(self.LOCK_PATH) # Acquire lock 1. lock1.acquire() self.assertTrue(lock1.is_locked) self.assertFalse(lock2.is_locked) # Try to acquire lock 2. self.assertRaises(filelock.Timeout, lock2.acquire, timeout=1) # FIXME (Filelock) self.assertFalse(lock2.is_locked) self.assertTrue(lock1.is_locked) # Release lock 1. lock1.release() self.assertFalse(lock1.is_locked) self.assertFalse(lock2.is_locked) return None
Example #7
Source File: utils.py From mech with MIT License | 5 votes |
def settle_instance(instance_name, obj=None, force=False): makedirs(DATA_DIR) index_path = os.path.join(DATA_DIR, 'index') index_lock = os.path.join(DATA_DIR, 'index.lock') try: with FileLock(index_lock, timeout=3): updated = False if os.path.exists(index_path): with open(index_path) as fp: instances = json.loads(uncomment(fp.read())) # prune unexistent Mechfiles for k in list(instances): instance_data = instances[k] path = instance_data and instance_data.get('path') if not path or not os.path.exists(os.path.join(path, 'Mechfile')): del instances[k] updated = True else: instances = {} instance_data = instances.get(instance_name) if not instance_data or force: if obj: instance_data = instances[instance_name] = obj updated = True else: instance_data = {} if updated: with open(index_path, 'w') as fp: json.dump(instances, fp, sort_keys=True, indent=2, separators=(',', ': ')) return instance_data except Timeout: puts_err(colored.red(textwrap.fill("Couldn't access index, it seems locked."))) sys.exit(1)
Example #8
Source File: test.py From py-filelock with The Unlicense | 5 votes |
def test_default_timeout(self): """ Test if the default timeout parameter works. """ lock1 = self.LOCK_TYPE(self.LOCK_PATH) lock2 = self.LOCK_TYPE(self.LOCK_PATH, timeout = 1) self.assertEqual(lock2.timeout, 1) # Acquire lock 1. lock1.acquire() self.assertTrue(lock1.is_locked) self.assertFalse(lock2.is_locked) # Try to acquire lock 2. self.assertRaises(filelock.Timeout, lock2.acquire) # FIXME (SoftFileLock) self.assertFalse(lock2.is_locked) self.assertTrue(lock1.is_locked) lock2.timeout = 0 self.assertEqual(lock2.timeout, 0) self.assertRaises(filelock.Timeout, lock2.acquire) self.assertFalse(lock2.is_locked) self.assertTrue(lock1.is_locked) # Release lock 1. lock1.release() self.assertFalse(lock1.is_locked) self.assertFalse(lock2.is_locked) return None
Example #9
Source File: doom_gym.py From sample-factory with MIT License | 5 votes |
def _game_init(self, with_locking=True, max_parallel=10): lock_file = lock = None if with_locking: lock_file = doom_lock_file(max_parallel) lock = FileLock(lock_file) init_attempt = 0 while True: init_attempt += 1 try: if with_locking: with lock.acquire(timeout=20): self.game.init() else: self.game.init() break except Timeout: if with_locking: log.debug( 'Another process currently holds the lock %s, attempt: %d', lock_file, init_attempt, ) except Exception as exc: log.warning('VizDoom game.init() threw an exception %r. Terminate process...', exc) from envs.env_utils import EnvCriticalError raise EnvCriticalError()
Example #10
Source File: doom_multiagent_wrapper.py From sample-factory with MIT License | 5 votes |
def _ensure_initialized(self): if self.initialized: return self.workers = [ MultiAgentEnvWorker(i, self.make_env_func, self.env_config, reset_on_init=self.reset_on_init) for i in range(self.num_agents) ] init_attempt = 0 while True: init_attempt += 1 try: port_to_use = udp_port_num(self.env_config) port = find_available_port(port_to_use, increment=1000) log.debug('Using port %d', port) init_info = dict(port=port) lock_file = doom_lock_file(max_parallel=20) lock = FileLock(lock_file) with lock.acquire(timeout=10): for i, worker in enumerate(self.workers): worker.task_queue.put((init_info, TaskType.INIT)) if self.safe_init: time.sleep(1.0) # just in case else: time.sleep(0.05) for i, worker in enumerate(self.workers): worker.result_queue.get(timeout=20) worker.result_queue.task_done() worker.task_queue.join() except filelock.Timeout: continue except Exception: raise RuntimeError('Critical error: worker stuck on initialization. Abort!') else: break log.debug('%d agent workers initialized for env %d!', len(self.workers), self.env_config.worker_index) self.initialized = True
Example #11
Source File: ray_envs.py From sample-factory with MIT License | 5 votes |
def register_doom_envs_rllib(**kwargs): """Register env factories in RLLib system.""" for spec in DOOM_ENVS: def make_env_func(env_config): print('Creating env!!!') cfg = default_cfg(env=spec.name) cfg.pixel_format = 'HWC' # tensorflow models expect HWC by default if 'skip_frames' in env_config: cfg.env_frameskip = env_config['skip_frames'] if 'res_w' in env_config: cfg.res_w = env_config['res_w'] if 'res_h' in env_config: cfg.res_h = env_config['res_h'] if 'wide_aspect_ratio' in env_config: cfg.wide_aspect_ratio = env_config['wide_aspect_ratio'] env = make_doom_env(spec.name, env_config=env_config, cfg=cfg, **kwargs) # we lock the global mutex here, otherwise Doom instances may crash on first reset when too many of them are reset simultaneously lock = FileLock(DOOM_LOCK_PATH) attempt = 0 while True: attempt += 1 try: with lock.acquire(timeout=10): print('Env created, resetting...') env.reset() print('Env reset completed! Config:', env_config) break except Timeout: print('Another instance of this application currently holds the lock, attempt:', attempt) return env register_env(spec.name, make_env_func)
Example #12
Source File: cli.py From scyllabackup with MIT License | 5 votes |
def cli_run_with_lock(args=sys.argv[1:]): cli = parse_args(args) lock = filelock.FileLock(cli.lock) log_level = getattr(logging, cli.log_level.upper()) logger.setLevel(log_level) try: with lock.acquire(timeout=cli.lock_timeout): cli.func(cli) except filelock.Timeout: logger.info("Another Instance of application already running") sys.exit(2)
Example #13
Source File: test_local.py From S4 with GNU General Public License v3.0 | 5 votes |
def test_lock(self, local_client): local_client_2 = local.LocalSyncClient(local_client.path) local_client.lock(timeout=0.01) with pytest.raises(filelock.Timeout): local_client_2.lock(timeout=0.01) local_client.unlock() local_client.lock(timeout=0.01) local_client_2.unlock()
Example #14
Source File: utils.py From mech with MIT License | 5 votes |
def instances(): makedirs(DATA_DIR) index_path = os.path.join(DATA_DIR, 'index') index_lock = os.path.join(DATA_DIR, 'index.lock') try: with FileLock(index_lock, timeout=3): updated = False if os.path.exists(index_path): with open(index_path) as fp: instances = json.loads(uncomment(fp.read())) # prune unexistent Mechfiles for k in list(instances): instance_data = instances[k] path = instance_data and instance_data.get('path') if not path or not os.path.exists(os.path.join(path, 'Mechfile')): del instances[k] updated = True else: instances = {} if updated: with open(index_path, 'w') as fp: json.dump(instances, fp, sort_keys=True, indent=2, separators=(',', ': ')) return instances except Timeout: puts_err(colored.red(textwrap.fill("Couldn't access index, it seems locked."))) sys.exit(1)
Example #15
Source File: mirror.py From bandersnatch with Academic Free License v3.0 | 5 votes |
def _bootstrap(self, flock_timeout: float = 1.0) -> None: paths = [ self.storage_backend.PATH_BACKEND(""), self.storage_backend.PATH_BACKEND("web/simple"), self.storage_backend.PATH_BACKEND("web/packages"), self.storage_backend.PATH_BACKEND("web/local-stats/days"), ] if self.json_save: logger.debug("Adding json directories to bootstrap") paths.extend( [ self.storage_backend.PATH_BACKEND("web/json"), self.storage_backend.PATH_BACKEND("web/pypi"), ] ) for path in paths: path = self.homedir / path if not path.exists(): logger.info(f"Setting up mirror directory: {path}") path.mkdir(parents=True) flock = self.storage_backend.get_lock(str(self.lockfile_path)) try: logger.debug(f"Acquiring FLock with timeout: {flock_timeout!s}") with flock.acquire(timeout=flock_timeout): self._validate_todo() self._load() except Timeout: logger.error("Flock timed out!") raise RuntimeError( f"Could not acquire lock on {self.lockfile_path}. " + "Another instance could be running?" )
Example #16
Source File: app_restartable.py From resilient-python-api with MIT License | 5 votes |
def run(*args, **kwargs): """Main app""" # define lock # this prevents multiple, identical circuits from running at the same time lock = get_lock() # The main app component initializes the Resilient services global application try: # attempt to lock file, wait 1 second for lock with lock.acquire(timeout=1): assert lock.is_locked application = AppRestartable(*args, **kwargs) application.run() except filelock.Timeout: # file is probably already locked errmsg = ("Failed to acquire lock on {0} - you may have " "another instance of Resilient Circuits running") print(errmsg.format(os.path.abspath(lock.lock_file))) except ValueError: LOG.exception("ValueError Raised. Application not running.") except OSError as exc: # Some other problem accessing the lockfile print("Unable to lock {0}: {1}".format(os.path.abspath(lock.lock_file), exc)) # finally: # LOG.info("App finished.")
Example #17
Source File: config.py From minemeld-core with Apache License 2.0 | 5 votes |
def _config_monitor(config_path): api_config_path = os.path.join(config_path, 'api') dirsnapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE) while True: try: with API_CONFIG_LOCK.acquire(timeout=600): new_snapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE) if new_snapshot != dirsnapshot: try: _load_config(config_path) _load_auth_dbs(config_path) except gevent.GreenletExit: break except: LOG.exception('Error loading config') dirsnapshot = new_snapshot except filelock.Timeout: LOG.error('Timeout locking config in config monitor') gevent.sleep(1) # initialization
Example #18
Source File: lock.py From tox with MIT License | 5 votes |
def hold_lock(lock_file, reporter=verbosity1): py.path.local(lock_file.dirname).ensure(dir=1) lock = FileLock(str(lock_file)) try: try: lock.acquire(0.0001) except Timeout: reporter("lock file {} present, will block until released".format(lock_file)) lock.acquire() yield finally: lock.release(force=True)
Example #19
Source File: cli.py From scyllabackup with MIT License | 4 votes |
def common_parser(): parser = configargparse.ArgParser(add_help=False) parser.add('-c', '--conf-file', is_config_file=True, help='Config file for scyllabackup') parser.add('-l', '--log-level', default='WARNING', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Log level for scyllabackup') parser.add('--path', required=True, help='Path of scylla data directory') parser.add('--db', required=True, help='Path of scyllabackup db file. The backup metadata is ' 'stored in this file.') parser.add('--provider', required=True, choices=['s3', 'wabs'], help='Cloud provider used for storage. It should be one of `s3` ' 'or `wabs`') parser.add('--nodetool-path', default='/usr/bin/nodetool', help='Path of nodetool utility on filesystem.') parser.add('--cqlsh-path', default='/usr/bin/cqlsh', help='Path of cqlsh utility on filesystem') parser.add('--cqlsh-host', default='127.0.0.1', help='Host to use for connecting cqlsh service') parser.add('--cqlsh-port', default='9042', help='Port to use for connecting cqlsh service') s3 = parser.add_argument_group("Required arguments if using " "'s3' provider") s3.add('--s3-bucket-name', metavar='BUCKET_NAME', help='Mandatory if provider is s3') s3.add('--s3-aws-key', metavar='AWS_KEY', help='Mandatory if provider is s3') s3.add('--s3-aws-secret', metavar='AWS_SECRET', help='Mandatory if provider is s3') wabs = parser.add_argument_group("Required arguments if using " "'wabs' provider") wabs.add('--wabs-container-name', help='Mandatory if provider is wabs') wabs.add('--wabs-account-name', help='Mandatory if provider is wabs') wabs.add('--wabs-sas-token', help='Mandatory if provider is wabs') parser.add('--prefix', required=True, help='Mandatory prefix to store backups in cloud storage') parser.add('--lock', default='/var/run/lock/scyllabackup.lock', help='Lock file for scyllabackup.') parser.add('--lock-timeout', type=int, default=10, help='Timeout for taking lock.') parser.add('--max-workers', type=int, default=4, help='Sets max workers for parallelizing storage api calls') return parser
Example #20
Source File: switch_common.py From power-up with Apache License 2.0 | 4 votes |
def send_cmd(self, cmd): if self.mode == 'passive': f = open(self.outfile, 'a+') f.write(cmd + '\n') f.close() return host_ip = gethostbyname(self.host) lockfile = os.path.join(SWITCH_LOCK_PATH, host_ip + '.lock') if not os.path.isfile(lockfile): os.mknod(lockfile) os.chmod(lockfile, stat.S_IRWXO | stat.S_IRWXG | stat.S_IRWXU) lock = FileLock(lockfile) cnt = 0 while cnt < 5 and not lock.is_locked: if cnt > 0: self.log.info('Waiting to acquire lock for switch {}'. format(self.host)) cnt += 1 try: lock.acquire(timeout=5, poll_intervall=0.05) # 5 sec, 50 ms sleep(0.01) # give switch a chance to close out comms except Timeout: pass if lock.is_locked: if self.ENABLE_REMOTE_CONFIG: cmd = self.ENABLE_REMOTE_CONFIG.format(cmd) self.log.debug(cmd) ssh = SSH() __, data, _ = ssh.exec_cmd( self.host, self.userid, self.password, cmd, ssh_log=True, look_for_keys=False) lock.release() # sleep 60 ms to give other processes a chance. sleep(0.06 + random() / 100) # lock acquire polls at 50 ms if lock.is_locked: self.log.error('Lock is locked. Should be unlocked') return data.decode("utf-8") else: self.log.error('Unable to acquire lock for switch {}'.format(self.host)) raise SwitchException('Unable to acquire lock for switch {}'. format(self.host))
Example #21
Source File: dataset_reader.py From allennlp with Apache License 2.0 | 4 votes |
def _instance_iterator(self, file_path: str) -> Iterable[Instance]: cache_file: Optional[str] = None if self._cache_directory: cache_file = self._get_cache_location_for_file_path(file_path) if cache_file is not None and os.path.exists(cache_file): cache_file_lock = FileLock(cache_file + ".lock", timeout=self.CACHE_FILE_LOCK_TIMEOUT) try: cache_file_lock.acquire() # We make an assumption here that if we can obtain the lock, no one will # be trying to write to the file anymore, so it should be safe to release the lock # before reading so that other processes can also read from it. cache_file_lock.release() logger.info("Reading instances from cache %s", cache_file) with open(cache_file) as data_file: yield from self._multi_worker_islice( data_file, transform=self.deserialize_instance ) except Timeout: logger.warning( "Failed to acquire lock on dataset cache file within %d seconds. " "Cannot use cache to read instances.", self.CACHE_FILE_LOCK_TIMEOUT, ) yield from self._multi_worker_islice(self._read(file_path), ensure_lazy=True) elif cache_file is not None and not os.path.exists(cache_file): instances = self._multi_worker_islice(self._read(file_path), ensure_lazy=True) # The cache file doesn't exist so we'll try writing to it. if self.max_instances is not None: # But we don't write to the cache when max_instances is specified. logger.warning("Skipping writing to data cache since max_instances was specified.") yield from instances elif util.is_distributed() or (get_worker_info() and get_worker_info().num_workers): # We also shouldn't write to the cache if there's more than one process loading # instances since each worker only receives a partial share of the instances. logger.warning( "Can't cache data instances when there are multiple processes loading data" ) yield from instances else: try: with FileLock(cache_file + ".lock", timeout=self.CACHE_FILE_LOCK_TIMEOUT): with CacheFile(cache_file, mode="w+") as cache_handle: logger.info("Caching instances to temp file %s", cache_handle.name) for instance in instances: cache_handle.write(self.serialize_instance(instance) + "\n") yield instance except Timeout: logger.warning( "Failed to acquire lock on dataset cache file within %d seconds. " "Cannot write to cache.", self.CACHE_FILE_LOCK_TIMEOUT, ) yield from instances else: # No cache. yield from self._multi_worker_islice(self._read(file_path), ensure_lazy=True)