Python filelock.FileLock() Examples
The following are 30
code examples of filelock.FileLock().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
filelock
, or try the search function
.
Example #1
Source File: mnist_env.py From HardRLWithYoutube with MIT License | 6 votes |
def __init__( self, seed=0, episode_len=None, no_images=None ): from tensorflow.examples.tutorials.mnist import input_data # we could use temporary directory for this with a context manager and # TemporaryDirecotry, but then each test that uses mnist would re-download the data # this way the data is not cleaned up, but we only download it once per machine mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data') with filelock.FileLock(mnist_path + '.lock'): self.mnist = input_data.read_data_sets(mnist_path) self.np_random = np.random.RandomState() self.np_random.seed(seed) self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1)) self.action_space = Discrete(10) self.episode_len = episode_len self.time = 0 self.no_images = no_images self.train_mode() self.reset()
Example #2
Source File: state.py From cccatalog-api with MIT License | 6 votes |
def worker_finished(worker_ip): """ The scheduler received a notification indicating an indexing worker has finished its task. :param worker_ip: The private IP of the worker. :return: The target index if all workers are finished, else False. """ with FileLock('lock'), shelve.open('db', writeback=True) as db: try: _ = db['worker_statuses'][worker_ip] db['worker_statuses'][worker_ip] = WorkerStatus.FINISHED log.info(f'Received worker_finished signal from {worker_ip}') except KeyError: log.error( 'An indexer worker notified us it finished its task, but ' 'we are not tracking it.' ) for worker_key in db['worker_statuses']: if db['worker_statuses'][worker_key] == WorkerStatus.RUNNING: log.info(f'{worker_key} is still indexing') return False return db['target_index']
Example #3
Source File: mnist_env.py From baselines with MIT License | 6 votes |
def __init__( self, episode_len=None, no_images=None ): import filelock from tensorflow.examples.tutorials.mnist import input_data # we could use temporary directory for this with a context manager and # TemporaryDirecotry, but then each test that uses mnist would re-download the data # this way the data is not cleaned up, but we only download it once per machine mnist_path = osp.join(tempfile.gettempdir(), 'MNIST_data') with filelock.FileLock(mnist_path + '.lock'): self.mnist = input_data.read_data_sets(mnist_path) self.np_random = np.random.RandomState() self.observation_space = Box(low=0.0, high=1.0, shape=(28,28,1)) self.action_space = Discrete(10) self.episode_len = episode_len self.time = 0 self.no_images = no_images self.train_mode() self.reset()
Example #4
Source File: local.py From cloudstorage with MIT License | 6 votes |
def lock_local_file(path: str) -> filelock.FileLock: """Platform dependent file lock. :param path: File or directory path to lock. :type path: str :yield: File lock context manager. :yield type: :class:`filelock.FileLock` :raise CloudStorageError: If lock could not be acquired. """ lock = filelock.FileLock(path + ".lock") try: lock.acquire(timeout=0.1) except filelock.Timeout: raise CloudStorageError("Lock timeout") yield lock if lock.is_locked: lock.release() if os.path.exists(lock.lock_file): os.remove(lock.lock_file)
Example #5
Source File: plot_parameter_server.py From ray with Apache License 2.0 | 6 votes |
def get_data_loader(): """Safely downloads data. Returns training/validation set dataloader.""" mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms), batch_size=128, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST("~/data", train=False, transform=mnist_transforms), batch_size=128, shuffle=True) return train_loader, test_loader
Example #6
Source File: extensionsapi.py From minemeld-core with Apache License 2.0 | 6 votes |
def _load_frozen_paths(): library_directory = config.get('MINEMELD_LOCAL_LIBRARY_PATH', None) if library_directory is None: LOG.error('freeze not updated - MINEMELD_LOCAL_LIBRARY_PATH not set') return freeze_path = os.path.join(library_directory, 'freeze.txt') if not os.path.isfile(freeze_path): LOG.info('Extensions frigidaire not found, paths not loaded') return freeze_lock = filelock.FileLock('{}.lock'.format(freeze_path)) with freeze_lock.acquire(timeout=30): with open(freeze_path, 'r') as ff: minemeld.extensions.load_frozen_paths(ff)
Example #7
Source File: configdataapi.py From minemeld-core with Apache License 2.0 | 6 votes |
def read(self): fdfname = self.datafilename+'.yml' lockfname = os.path.join(self.cpath, fdfname+'.lock') lock = filelock.FileLock(lockfname) os.listdir(self.cpath) if fdfname not in os.listdir(self.cpath): return jsonify(error={ 'message': 'Unknown config data file' }), 400 try: with lock.acquire(timeout=10): with open(os.path.join(self.cpath, fdfname), 'r') as f: result = yaml.safe_load(f) except Exception as e: return jsonify(error={ 'message': 'Error loading config data file: %s' % str(e) }), 500 return jsonify(result=result)
Example #8
Source File: configdataapi.py From minemeld-core with Apache License 2.0 | 6 votes |
def create(self): tdir = os.path.dirname(os.path.join(self.cpath, self.datafilename)) if not os.path.samefile(self.cpath, tdir): return jsonify(error={'msg': 'Wrong config data filename'}), 400 fdfname = os.path.join(self.cpath, self.datafilename+'.yml') lockfname = fdfname+'.lock' lock = filelock.FileLock(lockfname) try: body = request.get_json() except Exception as e: return jsonify(error={'message': str(e)}), 400 try: with lock.acquire(timeout=10): with open(fdfname, 'w') as f: yaml.safe_dump(body, stream=f) except Exception as e: return jsonify(error={ 'message': str(e) }), 500
Example #9
Source File: config.py From minemeld-core with Apache License 2.0 | 6 votes |
def init(): global API_CONFIG_PATH global API_CONFIG_LOCK config_path = os.environ.get('MM_CONFIG', None) if config_path is None: LOG.critical('MM_CONFIG environment variable not set') raise RuntimeError('MM_CONFIG environment variable not set') if not os.path.isdir(config_path): config_path = os.path.dirname(config_path) # init global vars API_CONFIG_PATH = os.path.join(config_path, 'api') API_CONFIG_LOCK = filelock.FileLock( os.environ.get('API_CONFIG_LOCK', '/var/run/minemeld/api-config.lock') ) _load_config(config_path) _load_auth_dbs(config_path) if config_path is not None: gevent.spawn(_config_monitor, config_path)
Example #10
Source File: mnist_pytorch.py From ray with Apache License 2.0 | 6 votes |
def get_data_loaders(): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms), batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST("~/data", train=False, transform=mnist_transforms), batch_size=64, shuffle=True) return train_loader, test_loader
Example #11
Source File: summary.py From chainerui with MIT License | 6 votes |
def save(self, out, timeout): filepath = os.path.join(out, self.filename) lockpath = filepath + '.lock' try: with filelock.FileLock(lockpath, timeout=timeout): saved_assets_list = [] if os.path.isfile(filepath): with open(filepath) as f: saved_assets_list = json.load(f) saved_assets_list.extend(self.cache[self.saved_idx:]) with open(filepath, 'w') as f: json.dump(saved_assets_list, f, indent=4) self.saved_idx = len(self.cache) except filelock.Timeout: logger.error('Process to write a list of assets is timeout')
Example #12
Source File: pbt_dcgan_mnist.py From ray with Apache License 2.0 | 6 votes |
def setup(self, config): use_cuda = config.get("use_gpu") and torch.cuda.is_available() self.device = torch.device("cuda" if use_cuda else "cpu") self.netD = Discriminator().to(self.device) self.netD.apply(weights_init) self.netG = Generator().to(self.device) self.netG.apply(weights_init) self.criterion = nn.BCELoss() self.optimizerD = optim.Adam( self.netD.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)) self.optimizerG = optim.Adam( self.netG.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)) with FileLock(os.path.expanduser("~/.data.lock")): self.dataloader = get_data_loader()
Example #13
Source File: conftest.py From pipelines with Apache License 2.0 | 6 votes |
def experiment_id(kfp_client, tmp_path_factory, worker_id): if not worker_id: return get_experiment_id(kfp_client) # Locking taking as an example from # https://github.com/pytest-dev/pytest-xdist#making-session-scoped-fixtures-execute-only-once # get the temp directory shared by all workers root_tmp_dir = tmp_path_factory.getbasetemp().parent fn = root_tmp_dir / "experiment_id" with FileLock(str(fn) + ".lock"): if fn.is_file(): data = fn.read_text() else: data = get_experiment_id(kfp_client) fn.write_text(data) return data
Example #14
Source File: voc_utils.py From chainercv with MIT License | 6 votes |
def get_voc(year, split): if year not in urls: raise ValueError key = year if split == 'test' and year == '2007': key = '2007_test' # To support ChainerMN, the target directory should be locked. with filelock.FileLock(os.path.join(download.get_dataset_directory( 'pfnet/chainercv/.lock'), 'voc.lock')): data_root = download.get_dataset_directory(root) base_path = os.path.join(data_root, 'VOCdevkit/VOC{}'.format(year)) split_file = os.path.join( base_path, 'ImageSets/Main/{}.txt'.format(split)) if os.path.exists(split_file): # skip downloading return base_path download_file_path = utils.cached_download(urls[key]) ext = os.path.splitext(urls[key])[1] utils.extractall(download_file_path, data_root, ext) return base_path
Example #15
Source File: sbd_utils.py From chainercv with MIT License | 6 votes |
def get_sbd(): # To support ChainerMN, the target directory should be locked. with filelock.FileLock(os.path.join(download.get_dataset_directory( 'pfnet/chainercv/.lock'), 'sbd.lock')): data_root = download.get_dataset_directory(root) base_path = os.path.join(data_root, 'benchmark_RELEASE/dataset') train_voc2012_file = os.path.join(base_path, 'train_voc2012.txt') if os.path.exists(train_voc2012_file): # skip downloading return base_path download_file_path = utils.cached_download(url) ext = os.path.splitext(url)[1] utils.extractall(download_file_path, data_root, ext) six.moves.urllib.request.urlretrieve( train_voc2012_url, train_voc2012_file) _generate_voc2012_txt(base_path) return base_path
Example #16
Source File: app.py From resilient-python-api with MIT License | 6 votes |
def get_lock(): """Create a filelock""" # The run() method uses a file lock in the user's ~/.resilient directory to prevent multiple instances # of resilient circuits running. You can override the lockfile name in the # (and so allow multiple) by setting APP_LOCK_FILE in the environment. app_lock_file = os.environ.get("APP_LOCK_FILE", "") if not app_lock_file: lockfile = os.path.expanduser(os.path.join("~", ".resilient", "resilient_circuits_lockfile")) resilient_dir = os.path.dirname(lockfile) if not os.path.exists(resilient_dir): os.makedirs(resilient_dir) else: lockfile = os.path.expanduser(app_lock_file) lock = filelock.FileLock(lockfile) return lock
Example #17
Source File: doom_multiagent_wrapper.py From sample-factory with MIT License | 5 votes |
def _ensure_initialized(self): if self.initialized: return self.workers = [ MultiAgentEnvWorker(i, self.make_env_func, self.env_config, reset_on_init=self.reset_on_init) for i in range(self.num_agents) ] init_attempt = 0 while True: init_attempt += 1 try: port_to_use = udp_port_num(self.env_config) port = find_available_port(port_to_use, increment=1000) log.debug('Using port %d', port) init_info = dict(port=port) lock_file = doom_lock_file(max_parallel=20) lock = FileLock(lock_file) with lock.acquire(timeout=10): for i, worker in enumerate(self.workers): worker.task_queue.put((init_info, TaskType.INIT)) if self.safe_init: time.sleep(1.0) # just in case else: time.sleep(0.05) for i, worker in enumerate(self.workers): worker.result_queue.get(timeout=20) worker.result_queue.task_done() worker.task_queue.join() except filelock.Timeout: continue except Exception: raise RuntimeError('Critical error: worker stuck on initialization. Abort!') else: break log.debug('%d agent workers initialized for env %d!', len(self.workers), self.env_config.worker_index) self.initialized = True
Example #18
Source File: utils.py From mech with MIT License | 5 votes |
def instances(): makedirs(DATA_DIR) index_path = os.path.join(DATA_DIR, 'index') index_lock = os.path.join(DATA_DIR, 'index.lock') try: with FileLock(index_lock, timeout=3): updated = False if os.path.exists(index_path): with open(index_path) as fp: instances = json.loads(uncomment(fp.read())) # prune unexistent Mechfiles for k in list(instances): instance_data = instances[k] path = instance_data and instance_data.get('path') if not path or not os.path.exists(os.path.join(path, 'Mechfile')): del instances[k] updated = True else: instances = {} if updated: with open(index_path, 'w') as fp: json.dump(instances, fp, sort_keys=True, indent=2, separators=(',', ': ')) return instances except Timeout: puts_err(colored.red(textwrap.fill("Couldn't access index, it seems locked."))) sys.exit(1)
Example #19
Source File: state.py From cccatalog-api with MIT License | 5 votes |
def clear_state(): """ Forget about all running index jobs. Use with care. """ with FileLock('lock'), shelve.open('db', writeback=True) as db: for key in db: log.info('Deleting ' + str(db[key])) del db[key] log.info('Cleared indexing state.')
Example #20
Source File: plot_hyperparameter.py From ray with Apache License 2.0 | 5 votes |
def get_data_loaders(batch_size): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))]) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms), batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST("~/data", train=False, transform=mnist_transforms), batch_size=batch_size, shuffle=True) return train_loader, test_loader ####################################################################### # Setup: Defining the Neural Network # ---------------------------------- # # We define a small neural network to use in training. In addition, # we created methods to train and test this neural network.
Example #21
Source File: filesystem.py From bandersnatch with Academic Free License v3.0 | 5 votes |
def get_lock(self, path: Optional[str] = None) -> filelock.FileLock: """ Retrieve the appropriate `FileLock` backend for this storage plugin :param str path: The path to use for locking :return: A `FileLock` backend for obtaining locks :rtype: SwiftFileLock """ if path is None: path = self.mirror_base_path.joinpath(self.flock_path).as_posix() logger.debug(f"Retrieving FileLock instance @ {path}") return filelock.FileLock(path)
Example #22
Source File: tpu_lm_finetuning.py From ru_transformers with Apache License 2.0 | 5 votes |
def __init__(self, tokenizer, file_path='train', args=None, shuffle=True): self.args = args if not hasattr(tokenizer, 'hash'): tokenizer.hash = '' log_info(f"Loading features from {file_path}") if os.path.isfile(file_path): files = [file_path] else: assert os.path.isdir(file_path) files = glob.glob(os.path.join(file_path, '*.txt')) files = sorted(files) if shuffle: random.shuffle(files) # The dataset can be big, like 230G big. Also, if you train on TPU you need a copy for each of 8 cores. # That is why we take a sample and then do resampling each args.reload_data_file epochs. # Even if dataset isn't that big it's still good because there is a random shift during dataloading. You can # consider it as a data augmentation technique. # In case of TPU, you need to make sure initial random seed is the same for each process or TPU will freeze because of # different datasets. files = files[:1000] self.examples = [] #with FileLock('first_time.lock'): for fn in tqdm(files, disable=not xm.is_master_ordinal()): self.examples.extend(self.process_file(fn, tokenizer, args.block_size, shuffle)) # num of batches as multiples of 8 # only for train if shuffle: mult = 8*args.train_batch_size * xm.xrt_world_size() new_len = len(self.examples) // mult * mult random.shuffle(self.examples) self.examples = self.examples[:new_len]
Example #23
Source File: cub_utils.py From chainercv with MIT License | 5 votes |
def get_cub_prob_map(): # To support ChainerMN, the target directory should be locked. with filelock.FileLock(os.path.join(download.get_dataset_directory( 'pfnet/chainercv/.lock'), 'cub.lock')): data_root = download.get_dataset_directory(root) base_path = os.path.join(data_root, 'segmentations') if os.path.exists(base_path): # skip downloading return base_path prob_map_download_file_path = utils.cached_download(prob_map_url) prob_map_ext = os.path.splitext(prob_map_url)[1] utils.extractall( prob_map_download_file_path, data_root, prob_map_ext) return base_path
Example #24
Source File: cub_utils.py From chainercv with MIT License | 5 votes |
def get_cub(): # To support ChainerMN, the target directory should be locked. with filelock.FileLock(os.path.join(download.get_dataset_directory( 'pfnet/chainercv/.lock'), 'cub.lock')): data_root = download.get_dataset_directory(root) base_path = os.path.join(data_root, 'CUB_200_2011') if os.path.exists(base_path): # skip downloading return base_path download_file_path = utils.cached_download(url) ext = os.path.splitext(url)[1] utils.extractall(download_file_path, data_root, ext) return base_path
Example #25
Source File: ade20k_utils.py From chainercv with MIT License | 5 votes |
def get_ade20k(root, url): # To support ChainerMN, the target directory should be locked. with filelock.FileLock(os.path.join(download.get_dataset_directory( 'pfnet/chainercv/.lock'), 'ade20k.lock')): data_root = download.get_dataset_directory(root) if os.path.exists(os.path.join(data_root, 'ADEChallengeData2016')): return data_root cache_path = utils.cached_download(url) utils.extractall(cache_path, data_root, os.path.splitext(url)[1]) return data_root
Example #26
Source File: sqlite.py From chocolate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, url, result_table="results", complementary_table="complementary", space_table="space"): super(SQLiteConnection, self).__init__() if url.endswith("/"): raise RuntimeError("Empty database name {}".format(url)) if url.endswith((" ", "\t")): raise RuntimeError("Database name ends with space {}".format(url)) if not url.startswith("sqlite://"): raise RuntimeError("Missing 'sqlite:///' at the begin of url".format(url)) if url == "sqlite://" or url == "sqlite:///:memory:": raise RuntimeError("Cannot use memory database as it exists only for the time of the connection") match = re.search("sqlite:///(.*)", url) if match is not None: db_path = match.group(1) else: raise RuntimeError("Cannot find sqlite db path in {}".format(url)) self.url = url self.result_table_name = result_table self.complementary_table_name = complementary_table self.space_table_name = space_table self._lock = filelock.FileLock("{}.lock".format(db_path)) self.hold_lock = False # with self.lock(): # db = dataset.connect(self.url) # # Initialize a result table and ensure float for loss # results = db[self.result_table_name] # results.create_column("_loss", sqlalchemy.Float)
Example #27
Source File: lock.py From guildai with Apache License 2.0 | 5 votes |
def Lock(name, timeout=-1, guild_home=None): guild_home = guild_home or config.guild_home() locks_dir = os.path.join(guild_home, "locks") util.ensure_dir(locks_dir) lock_path = os.path.join(locks_dir, name) return filelock.FileLock(lock_path, timeout)
Example #28
Source File: camvid_dataset.py From chainercv with MIT License | 5 votes |
def get_camvid(): # To support ChainerMN, the target directory should be locked. with filelock.FileLock(os.path.join(download.get_dataset_directory( 'pfnet/chainercv/.lock'), 'camvid.lock')): data_root = download.get_dataset_directory(root) download_file_path = utils.cached_download(url) if len(glob.glob(os.path.join(data_root, '*'))) != 9: utils.extractall( download_file_path, data_root, os.path.splitext(url)[1]) data_dir = os.path.join(data_root, 'SegNet-Tutorial-master/CamVid') if os.path.exists(data_dir): for fn in glob.glob(os.path.join(data_dir, '*')): shutil.move(fn, os.path.join(data_root, os.path.basename(fn))) shutil.rmtree(os.path.dirname(data_dir)) return data_root
Example #29
Source File: __init__.py From pullbox with MIT License | 5 votes |
def main(): parser = argparse.ArgumentParser(description='Pullbox') parser.add_argument('path', help='Path to data directory') parser.add_argument('server', help='IP/Domain name of backup server') parser.add_argument('--standard-suffix', action='store_true', help='Makes Pullbox use the standard .git suffix for bare git repos (server side only)') parser.add_argument('--log', default=LOG_DEFAULT_FNAME, help='Name of log file') parser.add_argument('--log-level', default='WARNING', help='Logging level as picked from the logging module') parser.add_argument('--quiet', action='store_true') parser.add_argument('--lock-file', default=DEFAULT_LOCK_FILE, help='Lock file to prevent multiple instances from running') args = parser.parse_args() lock = filelock.FileLock(args.lock_file) try: with lock.acquire(timeout=0): log = init_logger(args.log, args.log_level, quiet=args.quiet) p = Pullbox(args.server, args.path, log, args.standard_suffix) p.start() except (SystemExit, KeyboardInterrupt): sys.exit(1) except Exception, e: log = logging.getLogger('') log.exception('exiting process because of exception') print >> sys.stderr, str(e) sys.exit(1)
Example #30
Source File: subscription.py From oneview-redfish-toolkit with Apache License 2.0 | 5 votes |
def _get_file_lock(): lock_path = _all_subscription_file() + '.lock' lock = FileLock(lock_path) return lock