Python load disk
51 Python code examples are found related to "
load disk".
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: indexer.py From SearchingReddit with MIT License | 6 votes |
def load_from_disk(self, index_dir): self.inverted_index = shelve.open(os.path.join(index_dir, "inverted_index")) self.forward_index = shelve.open(os.path.join(index_dir, "forward_index")) self.url_to_id = shelve.open(os.path.join(index_dir, "url_to_id")) self.id_to_url = {v: k for k, v in self.url_to_id.items()} # TODO: avgdl and total doc count should be calculated when indexing self._doc_count = 0 """ total_word_count = 0 for (docid, text) in self.forward_index.iteritems(): self._doc_count += 1 total_word_count += len(text.parsed_text) self._avgdl = total_word_count / self._doc_count """ print "LOADED!"
Example 2
Source File: tinychain.py From tinychain with MIT License | 6 votes |
def load_from_disk(): if not os.path.isfile(CHAIN_PATH): return try: with open(CHAIN_PATH, "rb") as f: msg_len = int(binascii.hexlify(f.read(4) or b'\x00'), 16) new_blocks = deserialize(f.read(msg_len)) logger.info(f"loading chain from disk with {len(new_blocks)} blocks") for block in new_blocks: connect_block(block) except Exception: logger.exception('load chain failed, starting from genesis') # UTXO set # ----------------------------------------------------------------------------
Example 3
Source File: lookups_util.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def load_lookup_file_from_disk(file_path): """ parse the lookup file from the given path and return the result Args: file_path (string): the path to the lookup file Returns: lookup_data (dict): result from the csv parser """ if not file_exists(file_path): raise RuntimeError('Not valid filepath: {}'.format(file_path)) try: with open(file_path, mode='r') as f: reader = csv.DictReader(f) csv.field_size_limit(CSV_FILESIZE_LIMIT) lookup_data = reader.next() except Exception as e: raise RuntimeError('Error reading model file: %s, %s' % (file_path, str(e))) return lookup_data
Example 4
Source File: options.py From randovania with GNU General Public License v3.0 | 6 votes |
def load_from_disk(self, ignore_decode_errors: bool = False) -> bool: """ Loads the file created with `_save_to_disk`. :param ignore_decode_errors: If True, errors in the config file are ignored. :return: True, if a valid file exists. """ try: persisted_data = self._read_persisted_options() except json.decoder.JSONDecodeError as e: if ignore_decode_errors: persisted_data = None else: raise DecodeFailedException(f"Unable to decode JSON: {e}") if persisted_data is None: return False persisted_options = get_persisted_options_from_data(persisted_data) self.load_from_persisted(persisted_options, ignore_decode_errors) return True
Example 5
Source File: qt.py From randovania with GNU General Public License v3.0 | 6 votes |
def load_options_from_disk(options: Options) -> bool: parent: QWidget = None try: options.load_from_disk() return True except DecodeFailedException as decode_failed: user_response = QMessageBox.critical( parent, "Error loading previous settings", ("The following error occurred while restoring your settings:\n" "{}\n\n" "Do you want to reset this part of your settings?").format(decode_failed), QMessageBox.Yes | QMessageBox.No, QMessageBox.No ) if user_response == QMessageBox.Yes: options.load_from_disk(True) return True else: return False
Example 6
Source File: parse_fortran.py From fortran-language-server with MIT License | 6 votes |
def load_from_disk(self): """Read file from disk""" try: if PY3K: with open(self.path, 'r', encoding='utf-8', errors='replace') as fhandle: contents = re.sub(r'\t', r' ', fhandle.read()) self.hash = hashlib.md5(contents.encode('utf-8')).hexdigest() self.contents_split = contents.splitlines() else: with io.open(self.path, 'r', encoding='utf-8', errors='replace') as fhandle: contents = re.sub(r'\t', r' ', fhandle.read()) self.hash = hashlib.md5(contents.encode('utf-8')).hexdigest() self.contents_split = contents.splitlines() self.fixed = detect_fixed_format(self.contents_split) self.contents_pp = self.contents_split self.nLines = len(self.contents_split) except: return 'Could not read/decode file' else: return None
Example 7
Source File: save.py From PADME with MIT License | 6 votes |
def load_from_disk(filename): """Load a dataset from file.""" name = filename if os.path.splitext(name)[1] == ".gz": name = os.path.splitext(name)[0] if os.path.splitext(name)[1] == ".pkl": return load_pickle_from_disk(filename) elif os.path.splitext(name)[1] == ".joblib": try: return joblib.load(filename) except KeyError: # Try older joblib version for legacy files. return old_joblib.load(filename) except ValueError: return old_joblib.load(filename) elif os.path.splitext(name)[1] == ".csv": # First line of user-specified CSV *must* be header. df = pd.read_csv(filename, header=0) df = df.replace(np.nan, str(""), regex=True) return df else: raise ValueError("Unrecognized filetype for %s" % filename)
Example 8
Source File: save.py From PADME with MIT License | 6 votes |
def load_cv_dataset_from_disk(save_dir, fold_num): assert fold_num > 1 loaded = False train_data = [] valid_data = [] for i in range(fold_num): fold_dir = os.path.join(save_dir, "fold" + str(i + 1)) train_dir = os.path.join(fold_dir, "train_dir") valid_dir = os.path.join(fold_dir, "valid_dir") if not os.path.exists(train_dir) or not os.path.exists(valid_dir): return False, None, list() train = dcCustom.data.DiskDataset(train_dir) valid = dcCustom.data.DiskDataset(valid_dir) train_data.append(train) valid_data.append(valid) loaded = True with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f: transformers = pickle.load(f) return loaded, list(zip(train_data, valid_data)), transformers
Example 9
Source File: datacube.py From openeo-python-client with Apache License 2.0 | 6 votes |
def load_disk_collection(cls, connection: 'Connection', file_format: str, glob_pattern: str, **options) -> 'DataCube': """ Loads image data from disk as a DataCube. :param connection: The connection to use to connect with the backend. :param file_format: the file format, e.g. 'GTiff' :param glob_pattern: a glob pattern that matches the files to load from disk :param options: options specific to the file format :return: the data as a DataCube """ pg = PGNode( process_id='load_disk_data', arguments={ 'format': file_format, 'glob_pattern': glob_pattern, 'options': options } ) return cls(graph=pg, connection=connection, metadata={})
Example 10
Source File: pd_storage.py From Paradrop with Apache License 2.0 | 6 votes |
def loadFromDisk(self): """Attempts to load the data from disk. Returns True if success, False otherwise.""" if(pdos.exists(self.filename)): deleteFile = False try: pyld = pickle.load(pdos.open(self.filename, 'rb')) self.setAttr(self.importAttr(pyld)) return True except Exception as e: out.err('Error loading from disk: %s\n' % (str(e))) deleteFile = True # Delete the file if(deleteFile): try: pdos.unlink(self.filename) except Exception as e: out.err('Error unlinking %s\n' % (self.filename)) return False
Example 11
Source File: save.py From deepchem with MIT License | 6 votes |
def load_from_disk(filename): """Load a dataset from file.""" name = filename if os.path.splitext(name)[1] == ".gz": name = os.path.splitext(name)[0] extension = os.path.splitext(name)[1] if extension == ".pkl": return load_pickle_from_disk(filename) elif extension == ".joblib": return joblib.load(filename) elif extension == ".csv": # First line of user-specified CSV *must* be header. df = pd.read_csv(filename, header=0) df = df.replace(np.nan, str(""), regex=True) return df elif extension == ".npy": return np.load(filename, allow_pickle=True) else: raise ValueError("Unrecognized filetype for %s" % filename)
Example 12
Source File: preferences.py From software-boutique with GNU General Public License v3.0 | 5 votes |
def load_from_disk(self): """ Loads configuration from disk. Initialises the folder structure and file if necessary. """ if os.path.exists(self.file_path): try: with open(self.file_path) as stream: self.data = json.load(stream) except Exception as e: self.dbg.stdout("Read preferences failed! File will be re-created.", self.dbg.error, 1) self.init_config() else: self.init_config()
Example 13
Source File: save.py From PADME with MIT License | 5 votes |
def load_pickle_from_disk(filename): """Load dataset from pickle file.""" if ".gz" in filename: with gzip.open(filename, "rb") as f: df = pickle.load(f) else: with open(filename, "rb") as f: df = pickle.load(f) return df
Example 14
Source File: mapclient_qt.py From CrisisMappingToolkit with Apache License 2.0 | 5 votes |
def LoadCacheFromDisk(self, path): '''Read a cache file from disk''' def readPickleImage(pImage): return Image.fromstring(pImage['mode'], pImage['size'], pImage['pixels']) # Load the pickle formatted data with open(path, 'rb') as f: (pickle_images, TileManager._lru_keys) = pickle.load(f) # Unpack images one at a time TileManager._images = {} for (pImage, key) in zip(pickle_images, TileManager._lru_keys): TileManager._images[key] = readPickleImage(pImage) print('Loaded '+str(len(TileManager._lru_keys))+' tiles to cache from path: ' + path)
Example 15
Source File: loaders.py From open-solution-data-science-bowl-2018 with MIT License | 5 votes |
def load_from_disk(self, data_source, index, *, filetype, grayscale=False): if filetype == 'png': img_filepath = data_source[index] return self.load_image(img_filepath, grayscale=grayscale) elif filetype == 'json': json_filepath = data_source[index] return self.read_json(json_filepath) else: raise Exception('files must be png or json')
Example 16
Source File: EvalAgentBase.py From PokerRL with MIT License | 5 votes |
def load_from_disk(cls, path_to_eval_agent): state = load_pickle(path=path_to_eval_agent) eval_agent = cls(t_prof=state["t_prof"]) eval_agent.load_state_dict(state=state) return eval_agent
Example 17
Source File: models_util.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def load_algo_options_from_disk(file_path): model_data = load_lookup_file_from_disk(file_path) algo_name = model_data['algo'] model_options = json.loads(model_data['options']) return algo_name, model_data, model_options
Example 18
Source File: page.py From dactyl with MIT License | 5 votes |
def load_from_disk(self): """ Read the file from the filesystem, as either raw text or as a Jinja template, and load frontmatter, if any, either way. """ assert "md" in self.data if not self.skip_pp: logger.debug("... loading markdown from filesystem") path = self.config["content_path"] pp_env = self.get_pp_env(loader=FrontMatterFSLoader(path)) self.pp_template = pp_env.get_template(self.data["md"]) frontmatter = pp_env.loader.fm_map[self.data["md"]] merge_dicts(frontmatter, self.data) # special case: let frontmatter overwrite default "html" vals if PROVIDED_FILENAME_KEY in self.data and "html" in frontmatter: self.data["html"] = frontmatter["html"] self.twolines = pp_env.loader.twolines[self.data["md"]] else: logger.info("... reading markdown from file") fullpath = os.path.join(self.config["content_path"], self.data["md"]) with open(fullpath, "r", encoding="utf-8") as f: ftext = f.read() self.rawtext, frontmatter = parse_frontmatter(ftext) merge_dicts(frontmatter, self.data) # special case: let frontmatter overwrite default "html" vals if PROVIDED_FILENAME_KEY in self.data and "html" in frontmatter: self.data["html"] = frontmatter["html"] self.twolines = self.rawtext.split("\n", 2)[:2]
Example 19
Source File: vocab.py From SACN with MIT License | 5 votes |
def load_from_disk(self, name=''): if not os.path.exists(self.path + name): return False timestamp = time.ctime(os.path.getmtime(self.path + name)) timestamp = datetime.datetime.strptime(timestamp, '%a %b %d %H:%M:%S %Y') age_in_hours = (datetime.datetime.now() - timestamp).seconds/60./60. log.info('Loading vocab from: {0}'.format(self.path + name)) self.token2idx, self.idx2token, self.label2idx, self.idx2label = pickle.load(open(self.path, 'rb')) if age_in_hours > 12: log.info('Vocabulary outdated: {0}'.format(self.path + name)) return False else: return True
Example 20
Source File: resources.py From localstripe with GNU General Public License v3.0 | 5 votes |
def try_load_from_disk(self): try: with open('/tmp/localstripe.pickle', 'rb') as f: old = pickle.load(f) self.clear() self.update(old) except FileNotFoundError: pass
Example 21
Source File: prepare_model.py From camera-trap-classifier with MIT License | 5 votes |
def load_model_from_disk(path_to_model_on_disk, compile=True): """ Load weights from disk and add to model """ logging.info("Loading model from: %s" % path_to_model_on_disk) loaded_model = load_model( path_to_model_on_disk, compile=compile, custom_objects={ 'accuracy': accuracy, 'top_k_accuracy': top_k_accuracy, 'masked_loss_function': build_masked_loss(K.sparse_categorical_crossentropy)}) return loaded_model
Example 22
Source File: util.py From openprotein with MIT License | 5 votes |
def load_model_from_disk(path, force_cpu=True): if force_cpu: # load model with map_location set to storage (main mem) model = torch.load(path, map_location=lambda storage, loc: storage) # flattern parameters in memory model.flatten_parameters() # update internal state accordingly model.use_gpu = False else: # load model using default map_location model = torch.load(path) model.flatten_parameters() return model # Constants
Example 23
Source File: tm_util.py From openprotein with MIT License | 5 votes |
def load_data_from_disk(filename, partition_rotation=0): print("Loading data from disk...") data = parse_datafile_from_disk(filename) data_unzipped = list(zip(*data)) partitions = calculate_partitions( cluster_partitions=torch.LongTensor(np.array(data_unzipped[4])), types=torch.LongTensor(np.array(data_unzipped[3])), partitions_count=5) train_set = [] val_set = [] test_set = [] for idx, sample in enumerate(data): partition = int(partitions[idx]) # in range 0-4 rotated = (partition + partition_rotation) % 5 if int(rotated) <= 2: train_set.append(sample) elif int(rotated) == 3: val_set.append(sample) else: test_set.append(sample) print("Data splited as:", len(train_set), "train set", len(val_set), "validation set", len(test_set), "test set") return train_set, val_set, test_set
Example 24
Source File: loader.py From dcscn-super-resolution with MIT License | 5 votes |
def load_batch_image_from_disk(self, image_number): image_number = image_number % self.count input_image = self.load_input_batch_image(image_number) input_interpolated = self.load_interpolated_batch_image(image_number) true = self.load_true_batch_image(image_number) return input_image, input_interpolated, true
Example 25
Source File: segmenter.py From PartiallyReversibleUnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def loadFromDisk(self, id, epoch): path = self._getCheckpointPathLoad(id, epoch) checkpoint = torch.load(path) self.expConfig.net.load_state_dict(checkpoint["net_state_dict"]) #load optimizer: hack necessary because load_state_dict has bugs (See https://github.com/pytorch/pytorch/issues/2830#issuecomment-336194949) self.expConfig.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) for state in self.expConfig.optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): if torch.cuda.is_available(): state[k] = v.cuda() else: state[k] = v if "lr_sheudler_state_dict" in checkpoint: self.expConfig.lr_sheudler.load_state_dict(checkpoint["lr_sheudler_state_dict"]) #Hack lr sheudle #self.expConfig.lr_sheudler.milestones = [250, 400, 550] #load best epoch score (if available) if "bestMeanDice" in checkpoint: self.bestMeanDice = checkpoint["bestMeanDice"] self.bestMeanDiceEpoch = checkpoint["bestMeanDiceEpoch"] #load moving avg if available if "movingAvg" in checkpoint: self.movingAvg = checkpoint["movingAvg"] #load best moving avg epoch if available if "bestMovingAvgEpoch" in checkpoint: self.bestMovingAvgEpoch = checkpoint["bestMovingAvgEpoch"] if "bestMovingAvg" in checkpoint: self.bestMovingAvg = checkpoint["bestMovingAvg"] return checkpoint["epoch"]
Example 26
Source File: storage.py From rednotebook with GNU General Public License v2.0 | 5 votes |
def load_all_months_from_disk(data_dir): """ Load all months and return a directory mapping year-month values to month objects. """ months = {} logging.debug('Starting to load files in dir "%s"' % data_dir) for path, year_number, month_number in get_journal_files(data_dir): month = _load_month_from_disk(path, year_number, month_number) if month: months[format_year_and_month(year_number, month_number)] = month logging.debug('Finished loading files in dir "%s"' % data_dir) return months
Example 27
Source File: gpr.py From material_recommender with GNU General Public License v3.0 | 5 votes |
def load_from_disk(): path = tempfile.gettempdir() path = os.path.join(path, 'material_recommender') if not os.path.isdir(path): os.mkdir(path) gprobj_path = os.path.join(path, 'gpr.obj') if not os.path.isfile(gprobj_path): return with open(gprobj_path, 'rb') as gprobj: gpr_loaded = pickle.load(gprobj) return gpr_loaded
Example 28
Source File: utils.py From magpie with MIT License | 5 votes |
def load_from_disk(path_to_disk): """ Load a pickle from disk to memory """ if not os.path.exists(path_to_disk): raise ValueError("File " + path_to_disk + " does not exist") return pickle.load(open(path_to_disk, 'rb'))
Example 29
Source File: kvESX.py From vsphere-storage-for-docker with Apache License 2.0 | 5 votes |
def load_disk_lib(lib_name): """ Load the disk lib API library """ global lib if not lib: lib = CDLL(lib_name) lib.DiskLib_Init.argtypes = [] lib.DiskLib_Init.restype = c_bool lib.DiskLib_Init() return
Example 30
Source File: collectdReportMetrics.py From InsightAgent with Apache License 2.0 | 5 votes |
def calculate_disk_load_values(all_latest_timestamps, each_file, filenames, new_prev_endtime_epoch_l, raw_data_l, start_time_epoch_l, date_l): try: csvfile = open(os.path.join(csvpath, each_file + date_l)) reader = csv.reader(csvfile) for row in reader: if reader.line_num > 1: if long(int(float(row[0]))) < long(start_time_epoch_l): continue timestamp_str = str(int(float(row[0]))) new_prev_endtime_epoch_l = long(timestamp_str) * 1000.0 if timestamp_str in raw_data_l: value_list = raw_data_l[timestamp_str] value_list[filenames[each_file][0]] = row[1] if ("disk" in each_file) or ("interface" in each_file): value_list[filenames[each_file][1]] = row[2] elif "load" in each_file: value_list[filenames[each_file][1]] = row[2] value_list[filenames[each_file][2]] = row[3] raw_data_l[timestamp_str] = value_list else: value_list = {filenames[each_file][0]: row[1]} if ("disk" in each_file) or ("interface" in each_file): value_list[filenames[each_file][1]] = row[2] elif "load" in each_file: value_list[filenames[each_file][1]] = row[2] value_list[filenames[each_file][2]] = row[3] raw_data_l[timestamp_str] = value_list all_latest_timestamps.append(new_prev_endtime_epoch_l) except IOError: pass return new_prev_endtime_epoch_l
Example 31
Source File: save.py From PADME with MIT License | 5 votes |
def load_dataset_from_disk(save_dir): """ Parameters ---------- save_dir: str Returns ------- loaded: bool Whether the load succeeded all_dataset: (dc.data.Dataset, dc.data.Dataset, dc.data.Dataset) The train, valid, test datasets transformers: list of dc.trans.Transformer The transformers used for this dataset """ train_dir = os.path.join(save_dir, "train_dir") valid_dir = os.path.join(save_dir, "valid_dir") test_dir = os.path.join(save_dir, "test_dir") if not os.path.exists(train_dir) or not os.path.exists( valid_dir) or not os.path.exists(test_dir): return False, None, list() loaded = True train = dcCustom.data.DiskDataset(train_dir) valid = dcCustom.data.DiskDataset(valid_dir) test = dcCustom.data.DiskDataset(test_dir) all_dataset = (train, valid, test) with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f: transformers = pickle.load(f) return loaded, all_dataset, transformers
Example 32
Source File: loaders.py From open-solution-ship-detection with MIT License | 5 votes |
def load_from_disk(self, data_source, index, *, filetype, grayscale=False): if filetype == 'png': img_filepath = data_source[index] return self.load_image(img_filepath, grayscale=grayscale) elif filetype == 'json': json_filepath = data_source[index] return self.read_json(json_filepath) elif filetype == 'joblib': img_filepath = data_source[index] return self.load_joblib(img_filepath) else: raise Exception('files must be png or json or joblib')
Example 33
Source File: util.py From IRCLogParser with GNU General Public License v3.0 | 5 votes |
def load_from_disk(file_name): """ A function to load any data structure from a file using pickle module :param file_name: name of the file to be used for saving the data :return: data structure that exists in the file """ fileObject = open(file_name,'r') data = pickle.load(fileObject) fileObject.close() return data
Example 34
Source File: utils.py From char-cnn-text-classification-tensorflow with Apache License 2.0 | 5 votes |
def load_data_from_disk(): # Load dataset from file dataset = pandas.read_csv('./data/reviews.csv', encoding='utf-8', names=['comments', 'label']) # Split by words X = [clean_str(sentence) for sentence in dataset['comments']] X = [list(sentence) for sentence in X] Y = [[0, 1] if (label == 'positive') else [1, 0] for label in dataset['label']] return [X, Y]
Example 35
Source File: data.py From fanci with GNU General Public License v3.0 | 5 votes |
def load_from_disk(self): """ Load a workspace from disk. :return: """ log.info('Loading workspace from disk. This may take a while...') with open(settings.WORKSPACE_FILE, 'rb') as f: tmp_dict = pickle.load(f) self.__dict__.update(tmp_dict) log.info('Finished loading')
Example 36
Source File: prep_data.py From recipe-summarization with MIT License | 5 votes |
def load_images_disk(shape): """Load preprocessed images and associated keys from disk.""" images = np.load(_get_npy_filename(shape)) with open(_get_filename_filename(shape), 'rb') as f: filenames = pickle.load(f) print('Loaded {:,} preprocessed images from disk'.format(images.shape[0])) return {f: i for f, i in zip(filenames, images)}
Example 37
Source File: save.py From deepchem with MIT License | 5 votes |
def load_dataset_from_disk(save_dir): """ Parameters ---------- save_dir: str Returns ------- loaded: bool Whether the load succeeded all_dataset: (dc.data.Dataset, dc.data.Dataset, dc.data.Dataset) The train, valid, test datasets transformers: list of dc.trans.Transformer The transformers used for this dataset """ train_dir = os.path.join(save_dir, "train_dir") valid_dir = os.path.join(save_dir, "valid_dir") test_dir = os.path.join(save_dir, "test_dir") if not os.path.exists(train_dir) or not os.path.exists( valid_dir) or not os.path.exists(test_dir): return False, None, list() loaded = True train = deepchem.data.DiskDataset(train_dir) valid = deepchem.data.DiskDataset(valid_dir) test = deepchem.data.DiskDataset(test_dir) train.memory_cache_size = 40 * (1 << 20) # 40 MB all_dataset = (train, valid, test) with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f: transformers = pickle.load(f) return loaded, all_dataset, transformers
Example 38
Source File: _pickle_states.py From kepler-cfhp with MIT License | 5 votes |
def load_good_bloom_gadgets_from_disk(self): if not os.path.isfile('good_bloom_gadget_2nd.cache'): with open('good_bloom_gadget.cache', 'rb') as f: print('[+] loading good bloom gadget') self.good_bloom_gadget = pickle.load(f) else: with open('good_bloom_gadget_2nd.cache', 'rb') as f: print('[+] loading double bloomed good bloom state') self.good_bloom_gadget = pickle.load(f)
Example 39
Source File: zstack.py From indy-plenum with Apache License 2.0 | 5 votes |
def loadSecKeyFromDisk(directory, name): filePath = os.path.join(directory, "{}.key_secret".format(name)) try: _, secret = zmq.auth.load_certificate(filePath) return secret except (ValueError, IOError) as ex: raise KeyError from ex
Example 40
Source File: zstack.py From indy-plenum with Apache License 2.0 | 5 votes |
def loadPubKeyFromDisk(directory, name): filePath = os.path.join(directory, "{}.key".format(name)) try: public, _ = zmq.auth.load_certificate(filePath) return public except (ValueError, IOError) as ex: raise KeyError from ex
Example 41
Source File: loader.py From GTDWeb with GNU General Public License v2.0 | 4 votes |
def load_disk(self): """ Loads the migrations from all INSTALLED_APPS from disk. """ self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name = self.migrations_module(app_config.label) was_loaded = module_name in sys.modules try: module = import_module(module_name) except ImportError as e: # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e): self.unmigrated_apps.add(app_config.label) continue raise else: # PY3 will happily import empty dirs as namespaces. if not hasattr(module, '__file__'): self.unmigrated_apps.add(app_config.label) continue # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: six.moves.reload_module(module) self.migrated_apps.add(app_config.label) directory = os.path.dirname(module.__file__) # Scan for .py files migration_names = set() for name in os.listdir(directory): if name.endswith(".py"): import_name = name.rsplit(".", 1)[0] if import_name[0] not in "_.~": migration_names.add(import_name) # Load them south_style_migrations = False for migration_name in migration_names: try: migration_module = import_module("%s.%s" % (module_name, migration_name)) except ImportError as e: # Ignore South import errors, as we're triggering them if "south" in str(e).lower(): south_style_migrations = True break raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) # Ignore South-style migrations if hasattr(migration_module.Migration, "forwards"): south_style_migrations = True break self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label) if south_style_migrations: self.unmigrated_apps.add(app_config.label)
Example 42
Source File: _load_environment.py From dm_memorytasks with Apache License 2.0 | 4 votes |
def load_from_disk(path, settings): """Load Memory Tasks from disk. Args: path: Directory containing dm_memorytasks environment. settings: EnvironmentSettings required to start the environment. Returns: An implementation of dm_env.Environment. Raises: RuntimeError: If unable to start environment process. """ _validate_environment_settings(settings) executable_path = os.path.join(path, 'Linux64Player') libosmesa_path = os.path.join(path, 'external_libosmesa_llvmpipe.so') if not os.path.exists(executable_path) or not os.path.exists(libosmesa_path): raise RuntimeError( 'Cannot find dm_memorytasks executable or dependent files at path: {}' .format(path)) port = portpicker.pick_unused_port() process_flags = [ executable_path, # Unity command-line flags. '-logfile', '-batchmode', '-noaudio', # Other command-line flags. '--logtostderr', '--server_type=DM_ENV_RPC', '--uri_address=[::]:{}'.format(port), ] os.environ.update({ 'UNITY_RENDERER': 'software', 'UNITY_OSMESA_PATH': libosmesa_path, }) process = subprocess.Popen( process_flags, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if process.poll() is not None: raise RuntimeError('Failed to start dm_memorytasks process correctly.') return _MemoryTasksProcessEnv( _connect_to_environment(port, settings), _MEMORY_TASK_OBSERVATIONS, settings.num_action_repeats, process)
Example 43
Source File: loader.py From bioforum with MIT License | 4 votes |
def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ImportError as e: # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. if ((explicit and self.ignore_no_migrations) or ( not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))): self.unmigrated_apps.add(app_config.label) continue raise else: # PY3 will happily import empty dirs as namespaces. if not hasattr(module, '__file__'): self.unmigrated_apps.add(app_config.label) continue # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) self.migrated_apps.add(app_config.label) directory = os.path.dirname(module.__file__) # Scan for .py files migration_names = set() for name in os.listdir(directory): if name.endswith(".py"): import_name = name.rsplit(".", 1)[0] if import_name[0] not in "_.~": migration_names.add(import_name) # Load them for migration_name in migration_names: migration_module = import_module("%s.%s" % (module_name, migration_name)) if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, )
Example 44
Source File: loader.py From python2017 with MIT License | 4 votes |
def load_disk(self): """ Loads the migrations from all INSTALLED_APPS from disk. """ self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ImportError as e: # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. if ((explicit and self.ignore_no_migrations) or ( not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))): self.unmigrated_apps.add(app_config.label) continue raise else: # PY3 will happily import empty dirs as namespaces. if not hasattr(module, '__file__'): self.unmigrated_apps.add(app_config.label) continue # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: six.moves.reload_module(module) self.migrated_apps.add(app_config.label) directory = os.path.dirname(module.__file__) # Scan for .py files migration_names = set() for name in os.listdir(directory): if name.endswith(".py"): import_name = name.rsplit(".", 1)[0] if import_name[0] not in "_.~": migration_names.add(import_name) # Load them for migration_name in migration_names: migration_module = import_module("%s.%s" % (module_name, migration_name)) if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, )
Example 45
Source File: loader.py From openhgsenti with Apache License 2.0 | 4 votes |
def load_disk(self): """ Loads the migrations from all INSTALLED_APPS from disk. """ self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ImportError as e: # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e): self.unmigrated_apps.add(app_config.label) continue raise else: # PY3 will happily import empty dirs as namespaces. if not hasattr(module, '__file__'): self.unmigrated_apps.add(app_config.label) continue # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: six.moves.reload_module(module) self.migrated_apps.add(app_config.label) directory = os.path.dirname(module.__file__) # Scan for .py files migration_names = set() for name in os.listdir(directory): if name.endswith(".py"): import_name = name.rsplit(".", 1)[0] if import_name[0] not in "_.~": migration_names.add(import_name) # Load them for migration_name in migration_names: migration_module = import_module("%s.%s" % (module_name, migration_name)) if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, )
Example 46
Source File: loader.py From Hands-On-Application-Development-with-PyCharm with MIT License | 4 votes |
def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ImportError as e: # I hate doing this, but I don't want to squash other import errors. # Might be better to try a directory check directly. if ((explicit and self.ignore_no_migrations) or ( not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))): self.unmigrated_apps.add(app_config.label) continue raise else: # Empty directories are namespaces. # getattr() needed on PY36 and older (replace w/attribute access). if getattr(module, '__file__', None) is None: self.unmigrated_apps.add(app_config.label) continue # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) self.migrated_apps.add(app_config.label) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in '_~' } # Load migrations for migration_name in migration_names: migration_path = '%s.%s' % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if 'bad magic number' in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, )