Python tensorflow.keras.utils.get_file() Examples
The following are 5
code examples of tensorflow.keras.utils.get_file().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.utils
, or try the search function
.
Example #1
Source File: gender_detection.py From cvlib with MIT License | 6 votes |
def __init__(self): proto_url = 'https://download.cvlib.net/config/gender_detection/gender_deploy.prototxt' model_url = 'https://github.com/arunponnusamy/cvlib-files/releases/download/v0.1/gender_net.caffemodel' save_dir = os.path.expanduser('~') + os.path.sep + '.cvlib' + os.path.sep + 'pre-trained' if not os.path.exists(save_dir): os.makedirs(save_dir) self.proto = get_file('gender_deploy.prototxt', proto_url, cache_subdir=save_dir) self.model = get_file('gender_net.caffemodel', model_url, cache_subdir=save_dir) self.labels = ['male', 'female'] self.mean = (78.4263377603, 87.7689143744, 114.895847746) print('[INFO] Initializing gender detection model ..') self.net = cv2.dnn.readNetFromCaffe(self.proto, self.model)
Example #2
Source File: gender_detection.py From cvlib with MIT License | 6 votes |
def __init__(self): proto_url = 'https://download.cvlib.net/config/gender_detection/gender_deploy.prototxt' model_url = 'https://github.com/arunponnusamy/cvlib-files/releases/download/v0.1/gender_net.caffemodel' save_dir = os.path.expanduser('~') + os.path.sep + '.cvlib' + os.path.sep + 'pre-trained' if not os.path.exists(save_dir): os.makedirs(save_dir) self.proto = get_file('gender_deploy.prototxt', proto_url, cache_subdir=save_dir) self.model = get_file('gender_net.caffemodel', model_url, cache_subdir=save_dir) self.labels = ['male', 'female'] self.mean = (78.4263377603, 87.7689143744, 114.895847746) print('[INFO] Initializing gender detection model ..') self.net = cv2.dnn.readNetFromCaffe(self.proto, self.model)
Example #3
Source File: qm9.py From spektral with MIT License | 5 votes |
def _download_data(): _ = get_file( 'qm9.tar.gz', DATASET_URL, extract=True, cache_dir=DATA_PATH, cache_subdir=DATA_PATH ) os.rename(DATA_PATH + 'gdb9.sdf', DATA_PATH + 'qm9.sdf') os.rename(DATA_PATH + 'gdb9.sdf.csv', DATA_PATH + 'qm9.sdf.csv') os.remove(DATA_PATH + 'qm9.tar.gz')
Example #4
Source File: __init__.py From platypush with MIT License | 5 votes |
def _get_data(cls, data: Union[str, np.ndarray, Iterable, Dict[str, Union[Iterable, np.ndarray]]], model: Model) \ -> Union[np.ndarray, Iterable, Dict[str, Union[Iterable, np.ndarray]]]: if not isinstance(data, str): return data if data.startswith('http://') or data.startswith('https://'): filename = '{timestamp}_{filename}'.format( timestamp=datetime.now().timestamp(), filename=data.split('/')[-1]) data_file = utils.get_file(filename, data) else: data_file = os.path.abspath(os.path.expanduser(data)) extensions = [ext for ext in cls._supported_data_file_extensions if data_file.endswith('.' + ext)] if os.path.isfile(data_file): assert extensions, 'Unsupported type for file {}. Supported extensions: {}'.format( data_file, cls._supported_data_file_extensions ) extension = extensions.pop() if extension in cls._csv_extensions: return cls._get_csv_data(data_file) if extension == 'npy': return cls._get_numpy_data(data_file) if extension == 'npz': return cls._get_numpy_compressed_data(data_file) if extension in cls._image_extensions: return cls._get_image(data_file, model) raise AssertionError('Unsupported file type: {}'.format(data_file)) elif os.path.isdir(data_file): return cls._get_dir(data_file, model)
Example #5
Source File: pbt_memnn_example.py From ray with Apache License 2.0 | 5 votes |
def read_data(): # Get the file try: path = get_file( "babi-tasks-v1-2.tar.gz", origin="https://s3.amazonaws.com/text-datasets/" "babi_tasks_1-20_v1-2.tar.gz") except Exception: print( "Error downloading dataset, please download it manually:\n" "$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2" # noqa: E501 ".tar.gz\n" "$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz" # noqa: E501 ) raise # Choose challenge challenges = { # QA1 with 10,000 samples "single_supporting_fact_10k": "tasks_1-20_v1-2/en-10k/qa1_" "single-supporting-fact_{}.txt", # QA2 with 10,000 samples "two_supporting_facts_10k": "tasks_1-20_v1-2/en-10k/qa2_" "two-supporting-facts_{}.txt", } challenge_type = "single_supporting_fact_10k" challenge = challenges[challenge_type] with tarfile.open(path) as tar: train_stories = get_stories(tar.extractfile(challenge.format("train"))) test_stories = get_stories(tar.extractfile(challenge.format("test"))) return train_stories, test_stories