Python fasteners.InterProcessLock() Examples
The following are 30
code examples of fasteners.InterProcessLock().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
fasteners
, or try the search function
.
Example #1
Source File: thread_read_write.py From Auto-PyTorch with Apache License 2.0 | 6 votes |
def update_results_thread(filename, info): thread_lock.acquire() with fasteners.InterProcessLock('{0}.lock'.format(filename)): content = json.loads(read(filename)) name = info['name'] result = info['result'] refit_config = info['refit_config'] text = info['text'] seed = str(info['seed']) infos = content[name] if name in content else dict() infos[seed] = {'result': result, 'description': text, 'refit': refit_config} content[name] = infos write(filename, json.dumps(content, indent=4, sort_keys=True)) thread_lock.release()
Example #2
Source File: targets.py From sos with BSD 3-Clause "New" or "Revised" License | 6 votes |
def lock(self): if not self.sig_id: return # we will need to lock on a file that we do not really write to # otherwise the lock will be broken when we write to it. self._lock = fasteners.InterProcessLock( os.path.join(env.temp_dir, self.sig_id + '.lock')) if not self._lock.acquire(blocking=False): self._lock = None raise UnavailableLock((self.input_files, self.output_files, os.path.join(env.temp_dir, self.sig_id + '.lock'))) else: env.log_to_file( 'TARGET', f'Lock acquired for output files {short_repr(self.output_files)}' )
Example #3
Source File: gpu_scheduler.py From clgen with GNU General Public License v3.0 | 6 votes |
def GetDefaultScheduler() -> GpuScheduler: gpus = GPUtil.getGPUs() if not gpus: raise NoGpuAvailable("No GPUs available") if os.environ.get("TEST_TARGET") and os.environ.get("TEST_WITH_GPU") != "1": raise NoGpuAvailable("GPUs disabled for tests") app.Log( 2, "Creating default scheduler for %s", humanize.Plural(len(gpus), "GPU") ) return GpuScheduler( {gpu: fasteners.InterProcessLock(_LOCK_DIR / str(gpu.id)) for gpu in gpus} ) # This function is memoized since we can always acquire the same lock twice.
Example #4
Source File: platform_database.py From mbed-os-tools with Apache License 2.0 | 6 votes |
def _update_db(self): if self._prim_db: lock = InterProcessLock("%s.lock" % self._prim_db) acquired = lock.acquire(blocking=False) if not acquired: logger.debug("Waiting 60 seconds for file lock") acquired = lock.acquire(blocking=True, timeout=60) if acquired: try: with open(self._prim_db, "w", encoding="utf-8") as out: out.write(unicode(json.dumps(self._dbs[self._prim_db]))) return True finally: lock.release() else: logger.error( "Could not update platform database: " "Lock acquire failed after 60 seconds" ) return False else: logger.error( "Can't update platform database: destination database is ambiguous" ) return False
Example #5
Source File: util.py From elevation with Apache License 2.0 | 6 votes |
def ensure_setup(root, folders=(), file_templates=(), force=False, **kwargs): with fasteners.InterProcessLock(os.path.join(root, FOLDER_LOCKFILE_NAME)): created_folders = [] for path in [root] + [os.path.join(root, p) for p in folders]: if not os.path.exists(path): os.makedirs(path) created_folders.append(path) created_files = collections.OrderedDict() for relpath, template in collections.OrderedDict(file_templates).items(): path = os.path.join(root, relpath) if force or not os.path.exists(path): body = template.format(**kwargs) with open(path, 'w') as file: file.write(body) created_files[path] = body return created_folders, created_files
Example #6
Source File: lockutils.py From oslo.concurrency with Apache License 2.0 | 5 votes |
def external_lock(name, lock_file_prefix=None, lock_path=None): lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) return InterProcessLock(lock_file_path)
Example #7
Source File: thread_read_write.py From Auto-PyTorch with Apache License 2.0 | 5 votes |
def append(filename, content): with fasteners.InterProcessLock('{0}.lock'.format(filename)): with open(filename, 'a+') as f: f.write(content)
Example #8
Source File: tasks.py From sos with BSD 3-Clause "New" or "Revised" License | 5 votes |
def check_tasks(tasks, is_all: bool): if not tasks: return {} cache_file: str = os.path.join( os.path.expanduser('~'), '.sos', 'tasks', 'status_cache.pickle') # status_cache: Dict = {} if os.path.isfile(cache_file): try: with fasteners.InterProcessLock(cache_file + '_'): with open(cache_file, 'rb') as cache: status_cache = pickle.load(cache) except Exception: # if the cache file is corrupted, remove it. #1275 os.remove(cache_file) # at most 20 threads from multiprocessing.pool import ThreadPool as Pool p = Pool(min(20, len(tasks))) # the result can be {} for unchanged, or real results raw_status = p.starmap(check_task, [(x, status_cache.get(x, {})) for x in tasks]) # if check all, we clear the cache and record all existing tasks has_changes: bool = any(x for x in raw_status) if has_changes: if is_all: status_cache = { k: v if v else status_cache[k] for k, v in zip(tasks, raw_status) } else: status_cache.update({k: v for k, v in zip(tasks, raw_status) if v}) with fasteners.InterProcessLock(cache_file + '_'): with open(cache_file, 'wb') as cache: pickle.dump(status_cache, cache) return status_cache
Example #9
Source File: tasks.py From sos with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _set_params(self, params): params_block = lzma.compress(pickle.dumps(params)) #env.logger.error(f'updating {self.task_id} params of size {len(params_block)}') with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + '.lck')): with open(self.task_file, 'r+b') as fh: header = self._read_header(fh) if len(params_block) == header.params_size: fh.seek(self.header_size, 0) fh.write(params_block) else: fh.read(header.params_size) runtime = fh.read(header.runtime_size) shell = fh.read(header.shell_size) pulse = fh.read(header.pulse_size) stdout = fh.read(header.stdout_size) stderr = fh.read(header.stderr_size) result = fh.read(header.result_size) signature = fh.read(header.signature_size) header = header._replace(params_size=len(params_block)) self._write_header(fh, header) fh.write(params_block) if runtime: fh.write(runtime) if shell: fh.write(shell) if pulse: fh.write(pulse) if stdout: fh.write(stdout) if stderr: fh.write(stderr) if result: fh.write(result) if signature: fh.write(signature) fh.truncate(self.header_size + header.params_size + header.runtime_size + header.shell_size + header.pulse_size + header.stdout_size + header.stderr_size + header.result_size + header.signature_size)
Example #10
Source File: tasks.py From sos with BSD 3-Clause "New" or "Revised" License | 5 votes |
def reset(self): # remove result, input, output etc and set the status of the task to new with fasteners.InterProcessLock( os.path.join(env.temp_dir, self.task_id + '.lck')): with open(self.task_file, 'r+b') as fh: self._reset(fh)
Example #11
Source File: dag.py From sos with BSD 3-Clause "New" or "Revised" License | 5 votes |
def find_executable(self): '''Find an executable node, which means nodes that has not been completed and has no input dependency.''' if 'DAG' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']: env.log_to_file('DAG', 'find_executable') for node in self.nodes(): # if it has not been executed if node._status is None: with_dependency = False for edge in self.in_edges(node): if edge[0]._status != 'completed': with_dependency = True break if not with_dependency: return node # if no node could be found, let use try pending ones pending_jobs = [ x for x in self.nodes() if x._status == 'signature_pending' ] if pending_jobs: try: notifier = ActivityNotifier( f'Waiting for {len(pending_jobs)} pending job{"s: e.g." if len(pending_jobs) > 1 else ":"} output {short_repr(pending_jobs[0]._signature[0])} with signature file {pending_jobs[0]._signature[1] + "_"}. You can manually remove this lock file if you are certain that no other process is working on the output.' ) while True: for node in pending_jobs: # if it has not been executed lock = fasteners.InterProcessLock(node._signature[1] + '_') if lock.acquire(blocking=False): lock.release() node._status = None return node time.sleep(0.1) except Exception as e: env.logger.error(str(e)) finally: notifier.stop() return None
Example #12
Source File: utils.py From sos with BSD 3-Clause "New" or "Revised" License | 5 votes |
def transcribe(text, cmd=None): if cmd is not None: text = '{}:\n{}'.format(cmd, ' ' + text.replace('\n', '\n ') + '\n') with fasteners.InterProcessLock( os.path.join(env.temp_dir, 'transcript.lck')): with open(os.path.join(env.exec_dir, '.sos', 'transcript.txt'), 'a') as trans: trans.write(text)
Example #13
Source File: multiprocess_file_storage.py From jarvis with GNU General Public License v2.0 | 5 votes |
def __init__(self, filename): self._file = None self._filename = filename self._process_lock = fasteners.InterProcessLock( '{0}.lock'.format(filename)) self._thread_lock = threading.Lock() self._read_only = False self._credentials = {}
Example #14
Source File: util.py From elevation with Apache License 2.0 | 5 votes |
def lock_vrt(datasource_root, product): with fasteners.InterProcessLock(os.path.join(datasource_root, product + '.vrt.lock')): yield
Example #15
Source File: util.py From elevation with Apache License 2.0 | 5 votes |
def lock_tiles(datasource_root, tile_names): locks = [] for tile_name in tile_names: lockfile_name = os.path.join(datasource_root, 'cache', tile_name + '.lock') locks.append(fasteners.InterProcessLock(lockfile_name)) for lock in locks: lock.acquire(blocking=True) yield for lock in locks: lock.release()
Example #16
Source File: logdir_helpers.py From imgcomp-cvpr with GNU General Public License v3.0 | 5 votes |
def _mkdir_threadsafe_unique(log_dir_root, log_date, postfix_dir_name): os.makedirs(log_dir_root, exist_ok=True) # Make sure only one process at a time writes into log_dir_root with fasteners.InterProcessLock(os.path.join(log_dir_root, 'lock')): return _mkdir_unique(log_dir_root, log_date, postfix_dir_name)
Example #17
Source File: shell.py From gilt with MIT License | 5 votes |
def overlay(ctx): # pragma: no cover """Install gilt dependencies """ args = ctx.obj.get("args") filename = args.get("config") debug = args.get("debug") _setup(filename) for c in config.config(filename): with fasteners.InterProcessLock(c.lock_file): util.print_info("{}:".format(c.name)) if not os.path.exists(c.src): git.clone(c.name, c.git, c.src, debug=debug) if c.dst: git.extract(c.src, c.dst, c.version, debug=debug) post_commands = {c.dst: c.post_commands} else: git.overlay(c.src, c.files, c.version, debug=debug) post_commands = { conf.dst: conf.post_commands for conf in c.files } # Run post commands if any. for dst, commands in post_commands.items(): for command in commands: msg = " - running `{}` in {}".format(command, dst) util.print_info(msg) cmd = util.build_sh_cmd(command, cwd=dst) util.run_command(cmd, debug=debug)
Example #18
Source File: train.py From imgcomp-cvpr with GNU General Public License v3.0 | 5 votes |
def _write_to_sheets(log_date, ae_config_rel_path, pc_config_rel_path, description, git_ref, log_dir_root, is_continue): try: with fasteners.InterProcessLock(sheets_logger.get_lock_file_p()): sheets_logger.insert_row( log_date + ('c' if is_continue else ''), os.environ.get('JOB_ID', 'N/A'), ae_config_rel_path, pc_config_rel_path, description, '', git_ref, log_dir_root) except sheets_logger.GoogleSheetsAccessFailedException as e: print(e)
Example #19
Source File: logdir_helpers.py From L3C-PyTorch with GNU General Public License v3.0 | 5 votes |
def _mkdir_threadsafe_unique(log_dir_root, log_date, postfix_dir_name): os.makedirs(log_dir_root, exist_ok=True) # Make sure only one process at a time writes into log_dir_root with fasteners.InterProcessLock(os.path.join(log_dir_root, 'lock')): return _mkdir_unique(log_dir_root, log_date, postfix_dir_name)
Example #20
Source File: multiscale_tester.py From L3C-PyTorch with GNU General Public License v3.0 | 5 votes |
def _acquire_lock(self): with fasteners.InterProcessLock(self.lock_file): yield
Example #21
Source File: tasks.py From OasisPlatform with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_lock(): lock = fasteners.InterProcessLock(settings.get('worker', 'LOCK_FILE')) gotten = lock.acquire(blocking=False, timeout=settings.getfloat('worker', 'LOCK_TIMEOUT_IN_SECS')) yield gotten if gotten: lock.release()
Example #22
Source File: multiprocess_file_storage.py From aqua-monitor with GNU Lesser General Public License v3.0 | 5 votes |
def __init__(self, filename): self._file = None self._filename = filename self._process_lock = fasteners.InterProcessLock( '{0}.lock'.format(filename)) self._thread_lock = threading.Lock() self._read_only = False self._credentials = {}
Example #23
Source File: utils.py From cloudify-manager with Apache License 2.0 | 5 votes |
def storage_file_lock(storage_file_path): with fasteners.InterProcessLock('{0}.lock'.format(storage_file_path)): yield
Example #24
Source File: __init__.py From magnitude with MIT License | 5 votes |
def _setup_for_mmap(self): # Setup variables for get_vectors_mmap() self._all_vectors = None self._approx_index = None if not self.memory_db: self.db_hash = fast_md5_file(self.path) else: self.db_hash = self.uid self.md5 = hashlib.md5(",".join( [self.path, self.db_hash, str(self.length), str(self.dim), str(self.precision), str(self.case_insensitive) ]).encode('utf-8')).hexdigest() self.path_to_mmap = os.path.join(tempfile.gettempdir(), self.md5 + '.magmmap') self.path_to_approx_mmap = os.path.join(tempfile.gettempdir(), self.md5 + '.approx.magmmap') if self.path_to_mmap not in Magnitude.MMAP_THREAD_LOCK: Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] = threading.Lock() if self.path_to_approx_mmap not in Magnitude.MMAP_THREAD_LOCK: Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] = \ threading.Lock() self.MMAP_THREAD_LOCK = Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] self.MMAP_PROCESS_LOCK = InterProcessLock(self.path_to_mmap + '.lock') self.APPROX_MMAP_THREAD_LOCK = \ Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] self.APPROX_MMAP_PROCESS_LOCK = \ InterProcessLock(self.path_to_approx_mmap + '.lock') self.setup_for_mmap = True
Example #25
Source File: __init__.py From magnitude with MIT License | 5 votes |
def _setup_for_mmap(self): # Setup variables for get_vectors_mmap() self._all_vectors = None self._approx_index = None if not self.memory_db: self.db_hash = fast_md5_file(self.path) else: self.db_hash = self.uid self.md5 = hashlib.md5(",".join( [self.path, self.db_hash, str(self.length), str(self.dim), str(self.precision), str(self.case_insensitive) ]).encode('utf-8')).hexdigest() self.path_to_mmap = os.path.join(tempfile.gettempdir(), self.md5 + '.magmmap') self.path_to_approx_mmap = os.path.join(tempfile.gettempdir(), self.md5 + '.approx.magmmap') if self.path_to_mmap not in Magnitude.MMAP_THREAD_LOCK: Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] = threading.Lock() if self.path_to_approx_mmap not in Magnitude.MMAP_THREAD_LOCK: Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] = \ threading.Lock() self.MMAP_THREAD_LOCK = Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] self.MMAP_PROCESS_LOCK = InterProcessLock(self.path_to_mmap + '.lock') self.APPROX_MMAP_THREAD_LOCK = \ Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] self.APPROX_MMAP_PROCESS_LOCK = \ InterProcessLock(self.path_to_approx_mmap + '.lock') self.setup_for_mmap = True
Example #26
Source File: __init__.py From magnitude with MIT License | 5 votes |
def _setup_for_mmap(self): # Setup variables for get_vectors_mmap() self._all_vectors = None self._approx_index = None if not self.memory_db: self.db_hash = fast_md5_file(self.path) else: self.db_hash = self.uid self.md5 = hashlib.md5(",".join( [self.path, self.db_hash, str(self.length), str(self.dim), str(self.precision), str(self.case_insensitive) ]).encode('utf-8')).hexdigest() self.path_to_mmap = os.path.join(tempfile.gettempdir(), self.md5 + '.magmmap') self.path_to_approx_mmap = os.path.join(tempfile.gettempdir(), self.md5 + '.approx.magmmap') if self.path_to_mmap not in Magnitude.MMAP_THREAD_LOCK: Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] = threading.Lock() if self.path_to_approx_mmap not in Magnitude.MMAP_THREAD_LOCK: Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] = \ threading.Lock() self.MMAP_THREAD_LOCK = Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] self.MMAP_PROCESS_LOCK = InterProcessLock(self.path_to_mmap + '.lock') self.APPROX_MMAP_THREAD_LOCK = \ Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] self.APPROX_MMAP_PROCESS_LOCK = \ InterProcessLock(self.path_to_approx_mmap + '.lock') self.setup_for_mmap = True
Example #27
Source File: multiprocess_file_storage.py From alfred-gmail with MIT License | 5 votes |
def __init__(self, filename): self._file = None self._filename = filename self._process_lock = fasteners.InterProcessLock( '{0}.lock'.format(filename)) self._thread_lock = threading.Lock() self._read_only = False self._credentials = {}
Example #28
Source File: utils.py From storops with Apache License 2.0 | 5 votes |
def lock(self): return fasteners.InterProcessLock(self.lock_file_name)
Example #29
Source File: test_process_executor_concurrent_modifications.py From incubator-ariatosca with Apache License 2.0 | 5 votes |
def _concurrent_update(lock_files, node, key, first_value, second_value, holder_path): holder = helpers.FilesystemDataHolder(holder_path) locker1 = fasteners.InterProcessLock(lock_files[0]) locker2 = fasteners.InterProcessLock(lock_files[1]) first = locker1.acquire(blocking=False) if first: # Give chance for both processes to acquire locks while locker2.acquire(blocking=False): locker2.release() time.sleep(0.1) else: locker2.acquire() node.attributes[key] = first_value if first else second_value holder['key'] = first_value if first else second_value holder.setdefault('invocations', 0) holder['invocations'] += 1 if first: locker1.release() else: with locker1: locker2.release() return first
Example #30
Source File: impl_dir.py From taskflow with Apache License 2.0 | 5 votes |
def _path_lock(self, path): lockfile = self._join_path(path, 'lock') with fasteners.InterProcessLock(lockfile) as lock: with _storagefailure_wrapper(): yield lock