Python numpy.savez_compressed() Examples
The following are 30
code examples of numpy.savez_compressed().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: fetch_data_generation.py From ICML2019-TREX with MIT License | 6 votes |
def main(): env = gym.make('FetchPickAndPlace-v0') numItr = 100 initStateSpace = "random" env.reset() print("Reset!") while len(actions) < numItr: obs = env.reset() print("ITERATION NUMBER ", len(actions)) goToGoal(env, obs) fileName = "data_fetch" fileName += "_" + initStateSpace fileName += "_" + str(numItr) fileName += ".npz" np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
Example #2
Source File: section_track.py From ocelot with GNU General Public License v3.0 | 6 votes |
def save_twiss_file(self, twiss_list): if self.tws_file is None: tws_file_name = self.output_beam_file.replace("particles", "tws") else: tws_file_name = self.tws_file self.folder_check_create(tws_file_name) bx = np.array([tw.beta_x for tw in twiss_list]) by = np.array([tw.beta_y for tw in twiss_list]) ax = np.array([tw.alpha_x for tw in twiss_list]) ay = np.array([tw.alpha_x for tw in twiss_list]) s = np.array([tw.s for tw in twiss_list]) E = np.array([tw.E for tw in twiss_list]) emit_x = np.array([tw.emit_x for tw in twiss_list]) emit_y = np.array([tw.emit_y for tw in twiss_list]) np.savez_compressed(tws_file_name, beta_x=bx, beta_y=by, alpha_x=ax, alpha_y=ay, E=E, s=s, emit_x=emit_x, emit_y=emit_y)
Example #3
Source File: level_iterator.py From safelife with Apache License 2.0 | 6 votes |
def combine_levels(directory): """ Merge all files in a single directory. """ files = sorted(glob.glob(os.path.join(directory, '*.npz'))) all_data = [] max_name_len = 0 for file in files: with np.load(file) as data: name = os.path.split(file)[1] max_name_len = max(max_name_len, len(name)) all_data.append(data.items() + [('name', name)]) dtype = [] for key, val in all_data[0][:-1]: dtype.append((key, val.dtype, val.shape)) dtype.append(('name', str, max_name_len)) combo_data = np.array([ tuple([val for key, val in data]) for data in all_data ], dtype=dtype) np.savez_compressed(directory + '.npz', levels=combo_data)
Example #4
Source File: convert_traj.py From imitation with MIT License | 6 votes |
def main(): parser = argparse.ArgumentParser() parser.add_argument("src_path", type=str) parser.add_argument("dst_path", type=str) args = parser.parse_args() src_path = Path(args.src_path) dst_path = Path(args.dst_path) assert src_path.is_file() src_trajs = types.load(str(src_path)) dst_trajs = convert_trajs_to_sb(src_trajs) os.makedirs(dst_path.parent, exist_ok=True) with open(dst_path, "wb") as f: np.savez_compressed(f, **dst_trajs) print(f"Dumped rollouts to {dst_path}")
Example #5
Source File: data_utils.py From PyTorch-Elmo-BiLSTMCRF with MIT License | 6 votes |
def export_trimmed_glove_vectors(vocab, glove_filename, trimmed_filename, dim): """Saves glove vectors in numpy array Args: vocab: dictionary vocab[word] = index glove_filename: a path to a glove file trimmed_filename: a path where to store a matrix in npy dim: (int) dimension of embeddings """ embeddings = np.zeros([len(vocab), dim]) with open(glove_filename, encoding="utf8") as f: for line in f: line = line.strip().split(' ') word = line[0] embedding = [float(x) for x in line[1:]] if word in vocab: word_idx = vocab[word] embeddings[word_idx] = np.asarray(embedding) np.savez_compressed(trimmed_filename, embeddings=embeddings)
Example #6
Source File: codecs.py From petastorm with Apache License 2.0 | 6 votes |
def encode(self, unischema_field, value): expected_dtype = unischema_field.numpy_dtype if isinstance(value, np.ndarray): if expected_dtype != value.dtype.type: raise ValueError('Unexpected type of {} feature. ' 'Expected {}. Got {}'.format(unischema_field.name, expected_dtype, value.dtype)) expected_shape = unischema_field.shape if not _is_compliant_shape(value.shape, expected_shape): raise ValueError('Unexpected dimensions of {} feature. ' 'Expected {}. Got {}'.format(unischema_field.name, expected_shape, value.shape)) else: raise ValueError('Unexpected type of {} feature. ' 'Expected ndarray of {}. Got {}'.format(unischema_field.name, expected_dtype, type(value))) memfile = BytesIO() np.savez_compressed(memfile, arr=value) return bytearray(memfile.getvalue())
Example #7
Source File: model_averaging.py From EEND with MIT License | 6 votes |
def average_model_chainer(ifiles, ofile): omodel = {} # get keys from the first file model = np.load(ifiles[0]) for x in model: if 'model' in x: print(x) keys = [x.split('main/')[1] for x in model if 'model' in x] print(keys) for path in ifiles: model = np.load(path) for key in keys: val = model['updater/model:main/{}'.format(key)] if key not in omodel: omodel[key] = val else: omodel[key] += val for key in keys: omodel[key] /= len(ifiles) np.savez_compressed(ofile, **omodel)
Example #8
Source File: ner_data_utils.py From robotreviewer with GNU General Public License v3.0 | 6 votes |
def export_trimmed_glove_vectors(vocab, glove_filename, trimmed_filename, dim): """Saves glove vectors in numpy array Args: vocab: dictionary vocab[word] = index glove_filename: a path to a glove file trimmed_filename: a path where to store a matrix in npy dim: (int) dimension of embeddings """ embeddings = np.zeros([len(vocab), dim]) with open(glove_filename) as f: for line in f: line = line.strip().split(' ') word = line[0] embedding = [float(x) for x in line[1:]] if word in vocab: word_idx = vocab[word] embeddings[word_idx] = np.asarray(embedding) np.savez_compressed(trimmed_filename, embeddings=embeddings)
Example #9
Source File: mesh_prepare.py From MeshCNN with MIT License | 6 votes |
def fill_mesh(mesh2fill, file: str, opt): load_path = get_mesh_path(file, opt.num_aug) if os.path.exists(load_path): mesh_data = np.load(load_path, encoding='latin1', allow_pickle=True) else: mesh_data = from_scratch(file, opt) np.savez_compressed(load_path, gemm_edges=mesh_data.gemm_edges, vs=mesh_data.vs, edges=mesh_data.edges, edges_count=mesh_data.edges_count, ve=mesh_data.ve, v_mask=mesh_data.v_mask, filename=mesh_data.filename, sides=mesh_data.sides, edge_lengths=mesh_data.edge_lengths, edge_areas=mesh_data.edge_areas, features=mesh_data.features) mesh2fill.vs = mesh_data['vs'] mesh2fill.edges = mesh_data['edges'] mesh2fill.gemm_edges = mesh_data['gemm_edges'] mesh2fill.edges_count = int(mesh_data['edges_count']) mesh2fill.ve = mesh_data['ve'] mesh2fill.v_mask = mesh_data['v_mask'] mesh2fill.filename = str(mesh_data['filename']) mesh2fill.edge_lengths = mesh_data['edge_lengths'] mesh2fill.edge_areas = mesh_data['edge_areas'] mesh2fill.features = mesh_data['features'] mesh2fill.sides = mesh_data['sides']
Example #10
Source File: test_npz.py From chainer with MIT License | 6 votes |
def setUp(self): self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32) fd, path = tempfile.mkstemp() os.close(fd) self.temp_file_path = path with open(path, 'wb') as f: savez = numpy.savez_compressed if self.compress else numpy.savez savez( f, **{'x/': None, 'y': self.data, 'z': numpy.asarray(10), 'zf32': numpy.array(-2**60, dtype=numpy.float32), 'zi64': numpy.array(-2**60, dtype=numpy.int64), 'w': None}) try: self.npzfile = numpy.load(path, allow_pickle=True) except TypeError: self.npzfile = numpy.load(path) self.deserializer = npz.NpzDeserializer(self.npzfile)
Example #11
Source File: test_mmap.py From panns with GNU General Public License v2.0 | 6 votes |
def generate_large_matrix(): rows, cols = 1000000, 500 print 'Test serializing a %i x %i matrix ...' % (rows, cols) t = time.time() vecs = numpy.random.normal(0,1,(rows,cols)) print 'Matrix constructed, spent %.2f s' % (time.time() - t) f1 = open('test_data1', 'wb') t = time.time() print 'saving as numpy npz format ...' numpy.savez_compressed(f1, vecs) print 'save done, spent %.2f s' % (time.time() - t) f1.close() f2 = open('test_data2', 'wb') t = time.time() print 'saving as self-defined format ...' for v in vecs: f2.write(pickle.dumps(v, -1)) f2.close() print 'save done, spent %.2f s' % (time.time() - t) pass
Example #12
Source File: _mnist_helper.py From chainer with MIT License | 6 votes |
def make_npz(path, urls): x_url, y_url = urls x_path = download.cached_download(x_url) y_path = download.cached_download(y_url) with gzip.open(x_path, 'rb') as fx, gzip.open(y_path, 'rb') as fy: fx.read(4) fy.read(4) N, = struct.unpack('>i', fx.read(4)) if N != struct.unpack('>i', fy.read(4))[0]: raise RuntimeError('wrong pair of MNIST images and labels') fx.read(8) x = numpy.empty((N, 784), dtype=numpy.uint8) y = numpy.empty(N, dtype=numpy.uint8) for i in six.moves.range(N): y[i] = ord(fy.read(1)) for j in six.moves.range(784): x[i, j] = ord(fx.read(1)) numpy.savez_compressed(path, x=x, y=y) return {'x': x, 'y': y}
Example #13
Source File: storage.py From ffn with Apache License 2.0 | 6 votes |
def save_subvolume(labels, origins, output_path, **misc_items): """Saves an FFN subvolume. Args: labels: 3d zyx number array with the segment labels origins: dictionary mapping segment ID to origin information output_path: path at which to save the segmentation in the form of a .npz file **misc_items: (optional) additional values to save in the output file """ seg = segmentation.reduce_id_bits(labels) gfile.MakeDirs(os.path.dirname(output_path)) with atomic_file(output_path) as fd: np.savez_compressed(fd, segmentation=seg, origins=origins, **misc_items)
Example #14
Source File: inference.py From ffn with Apache License 2.0 | 6 votes |
def save_checkpoint(self, path): """Saves a inference checkpoint to `path`.""" self.log_info('Saving inference checkpoint to %s.', path) with timer_counter(self.counters, 'save_checkpoint'): gfile.MakeDirs(os.path.dirname(path)) with storage.atomic_file(path) as fd: seed_policy_state = None if self.seed_policy is not None: seed_policy_state = self.seed_policy.get_state() np.savez_compressed(fd, movement_policy=self.movement_policy.get_state(), segmentation=self.segmentation, seg_qprob=self.seg_prob, seed=self.seed, origins=self.origins, overlaps=self.overlaps, history=np.array(self.history), history_deleted=np.array(self.history_deleted), seed_policy_state=seed_policy_state, counters=self.counters.dumps()) self.log_info('Inference checkpoint saved.')
Example #15
Source File: cache.py From yatsm with MIT License | 6 votes |
def write_cache_file(cache_filename, Y, image_IDs): """ Writes data to a cache file using np.savez_compressed Args: cache_filename (str): cache filename Y (np.ndarray): data to write to cache file image_IDs (iterable): list of image IDs corresponding to data in cache file. If not specified, function will not check for correspondence """ np.savez_compressed(cache_filename, **{ 'Y': Y, _image_ID_str: image_IDs }) # Cache file updating
Example #16
Source File: data_io.py From kits19.MIScnn with GNU General Public License v3.0 | 6 votes |
def backup_batches(batches_vol, batches_seg, path, case_id): # Create model directory of not existent if not os.path.exists(path): os.mkdir(path) # Create subdirectory for the case if not existent case_dir = os.path.join(path, "tmp.case_" + str(case_id).zfill(5)) if not os.path.exists(case_dir): os.mkdir(case_dir) # Backup volume batches if batches_vol is not None: for i, batch in enumerate(batches_vol): out_path = os.path.join(case_dir, "batch_vol." + str(i)) np.savez(out_path, data=batch) # Backup segmentation batches if batches_seg is not None: for i, batch in enumerate(batches_seg): out_path = os.path.join(case_dir, "batch_seg." + str(i)) np.savez_compressed(out_path, data=batch) # Load a MRI object from a npz for fast access
Example #17
Source File: fetch_data_generation.py From ICML2019-TREX with MIT License | 6 votes |
def main(): env = gym.make('FetchPickAndPlace-v0') numItr = 100 initStateSpace = "random" env.reset() print("Reset!") while len(actions) < numItr: obs = env.reset() print("ITERATION NUMBER ", len(actions)) goToGoal(env, obs) fileName = "data_fetch" fileName += "_" + initStateSpace fileName += "_" + str(numItr) fileName += ".npz" np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
Example #18
Source File: sample_data.py From PJ_NLP with Apache License 2.0 | 6 votes |
def extract_data_and_split(train_set, data_label_id, label2id): """1、处理数据和标签, 抽取title_word和content_word,将label和data对应起来 保存到文件中 2、划分数据集""" datas = [] for line in train_set: data_id, _, title_word, _, content_word = line.replace('\n', '').split('\t') labels = ','.join([str(label) for label in data_label_id[data_id]]) info = '{}\t{}\t{}'.format(labels, title_word, content_word) datas.append(info) train_data, val_data = train_test_split(datas, test_size=0.05, random_state=2019) print('label num: {} - data num:{}'.format(len(label2id), len(datas))) print("train num: {} - dev num: {}".format(len(train_data), len(val_data))) np.savez_compressed(conf.label2id_path, data_label_id=data_label_id, label2id=label2id) with open(conf.train_file, 'w', encoding='utf-8') as fw: fw.write('\n'.join(train_data)) with open(conf.dev_file, 'w', encoding='utf-8') as fw: fw.write('\n'.join(val_data))
Example #19
Source File: process_data.py From Adversarial_Video_Generation with MIT License | 6 votes |
def process_training_data(num_clips): """ Processes random training clips from the full training data. Saves to TRAIN_DIR_CLIPS by default. @param num_clips: The number of clips to process. Default = 5000000 (set in __main__). @warning: This can take a couple of hours to complete with large numbers of clips. """ num_prev_clips = len(glob(c.TRAIN_DIR_CLIPS + '*')) for clip_num in xrange(num_prev_clips, num_clips + num_prev_clips): clip = process_clip() np.savez_compressed(c.TRAIN_DIR_CLIPS + str(clip_num), clip) if (clip_num + 1) % 100 == 0: print 'Processed %d clips' % (clip_num + 1)
Example #20
Source File: cal_fid_stat.py From AutoGAN with MIT License | 5 votes |
def main(): args = parse_args() ######## # PATHS ######## data_path = args.data_path output_path = args.output_file # if you have downloaded and extracted # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz # set this path to the directory where the extracted files are, otherwise # just set it to None and the script will later download the files for you inception_path = None print("check for inception model..", end=" ", flush=True) inception_path = fid.check_or_download_inception(inception_path) # download inception if necessary print("ok") # loads all images into memory (this might require a lot of RAM!) print("load images..", end=" ", flush=True) image_list = glob.glob(os.path.join(data_path, '*.jpg')) images = np.array([imread(str(fn)).astype(np.float32) for fn in image_list]) print("%d images found and loaded" % len(images)) print("create inception graph..", end=" ", flush=True) fid.create_inception_graph(inception_path) # load the graph into the current TF graph print("ok") print("calculte FID stats..", end=" ", flush=True) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) mu, sigma = fid.calculate_activation_statistics(images, sess, batch_size=100) np.savez_compressed(output_path, mu=mu, sigma=sigma) print("finished")
Example #21
Source File: npz.py From chainer with MIT License | 5 votes |
def save_npz(file, obj, compression=True): """Saves an object to the file in NPZ format. This is a short-cut function to save only one object into an NPZ file. Args: file (str or file-like): Target file to write to. obj: Object to be serialized. It must support serialization protocol. If it is a dictionary object, the serialization will be skipped. compression (bool): If ``True``, compression in the resulting zip file is enabled. .. seealso:: :func:`chainer.serializers.load_npz` """ if isinstance(file, six.string_types): with open(file, 'wb') as f: save_npz(f, obj, compression) return if isinstance(obj, dict): target = obj else: s = DictionarySerializer() s.save(obj) target = s.target if compression: numpy.savez_compressed(file, **target) else: numpy.savez(file, **target)
Example #22
Source File: ptb.py From chainer with MIT License | 5 votes |
def _retrieve_ptb_words(name, url): def creator(path): vocab = _retrieve_word_vocabulary() words = _load_words(url) x = numpy.empty(len(words), dtype=numpy.int32) for i, word in enumerate(words): x[i] = vocab[word] numpy.savez_compressed(path, x=x) return {'x': x} root = download.get_dataset_directory('pfnet/chainer/ptb') path = os.path.join(root, name) loaded = download.cache_or_load_file(path, creator, numpy.load) return loaded['x']
Example #23
Source File: EpisodeData.py From Grid2Op with Mozilla Public License 2.0 | 5 votes |
def save(self, path): np.savez_compressed(path, data=self.collection) # do not change keyword arguments
Example #24
Source File: prepro_feats.py From AAT with MIT License | 5 votes |
def main(params): net = getattr(resnet, params['model'])() net.load_state_dict(torch.load(os.path.join(params['model_root'],params['model']+'.pth'))) my_resnet = myResnet(net) my_resnet.cuda() my_resnet.eval() imgs = json.load(open(params['input_json'], 'r')) imgs = imgs['images'] N = len(imgs) seed(123) # make reproducible dir_fc = params['output_dir']+'_fc' dir_att = params['output_dir']+'_att' if not os.path.isdir(dir_fc): os.mkdir(dir_fc) if not os.path.isdir(dir_att): os.mkdir(dir_att) for i,img in enumerate(imgs): # load the image I = skimage.io.imread(os.path.join(params['images_root'], img['filepath'], img['filename'])) # handle grayscale input images if len(I.shape) == 2: I = I[:,:,np.newaxis] I = np.concatenate((I,I,I), axis=2) I = I.astype('float32')/255.0 I = torch.from_numpy(I.transpose([2,0,1])).cuda() I = preprocess(I) with torch.no_grad(): tmp_fc, tmp_att = my_resnet(I, params['att_size']) # write to pkl np.save(os.path.join(dir_fc, str(img['cocoid'])), tmp_fc.data.cpu().float().numpy()) np.savez_compressed(os.path.join(dir_att, str(img['cocoid'])), feat=tmp_att.data.cpu().float().numpy()) if i % 1000 == 0: print('processing %d/%d (%.2f%% done)' % (i, N, i*100.0/N)) print('wrote ', params['output_dir'])
Example #25
Source File: datasets.py From EvolutionaryGAN-pytorch with MIT License | 5 votes |
def __init__(self, root, transform=None, target_transform=None, loader=default_loader, load_in_mem=False, index_filename='imagenet_imgs.npz', **kwargs): classes, class_to_idx = find_classes(root) # Load pre-computed image directory walk if os.path.exists(index_filename): print('Loading pre-saved Index file %s...' % index_filename) imgs = np.load(index_filename)['imgs'] # If first time, walk the folder directory and save the # results to a pre-computed file. else: print('Generating Index file %s...' % index_filename) imgs = make_dataset(root, class_to_idx) np.savez_compressed(index_filename, **{'imgs' : imgs}) if len(imgs) == 0: raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n" "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) self.root = root self.imgs = imgs self.classes = classes self.class_to_idx = class_to_idx self.transform = transform self.target_transform = target_transform self.loader = loader self.load_in_mem = load_in_mem if self.load_in_mem: print('Loading all images into memory...') self.data, self.labels = [], [] for index in tqdm(range(len(self.imgs))): path, target = self.transform(imgs[index][0]), imgs[index][1] self.data.append(self.loader(path)) self.labels.append(target)
Example #26
Source File: test_format.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_compressed_roundtrip(): arr = np.random.rand(200, 200) npz_file = os.path.join(tempdir, 'compressed.npz') np.savez_compressed(npz_file, arr=arr) arr1 = np.load(npz_file)['arr'] assert_array_equal(arr, arr1) # aligned
Example #27
Source File: test_io.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_savez_compressed_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npz') as path: path = Path(path) np.savez_compressed(path, lab='place holder') data = np.load(path) assert_array_equal(data['lab'], 'place holder') data.close()
Example #28
Source File: generate_goals.py From real_robots with MIT License | 5 votes |
def main(seed=None, n1=0, n2=0, n3=0, n4=0, n5=0, n6=0, n7=0): """ Generates the specified number of goals and saves them in a file.\n The file is called goals-s{}-{}-{}-{}-{}-{}-{}-{}.npy.npz where enclosed brackets are replaced with the supplied options (seed, n1...n7) or 0. """ np.random.seed(seed) allgoals = [] env = gym.make('REALRobot-v0') env.reset() pos = env.robot.object_poses['mustard'][:] pos[2] = 0.41 orient = env._p.getQuaternionFromEuler(pos[3:]) env.robot.object_bodies['mustard'].reset_pose(pos[:3], orient) global basePosition _, basePosition, _, _ = runEnv(env) # In these for loops, we could add some progress bar... for _ in range(n1): allgoals += [generateGoal2D(env, 1)] for _ in range(n2): allgoals += [generateGoal2D(env, 2)] for _ in range(n3): allgoals += [generateGoal2D(env, 3)] for _ in range(n4): allgoals += [generateGoal25D(env, 1)] for _ in range(n5): allgoals += [generateGoal25D(env, 2)] for _ in range(n6): allgoals += [generateGoal25D(env, 3)] for _ in range(n7): allgoals += [generateGoal3D(env)] np.savez_compressed('goals-s{}-{}-{}-{}-{}-{}-{}-{}.npy' .format(seed, n1, n2, n3, n4, n5, n6, n7), allgoals) # checkRepeatability(env, allgoals)
Example #29
Source File: env.py From real_robots with MIT License | 5 votes |
def __init__(self, render=False): self.robot = Kuka() MJCFBaseBulletEnv.__init__(self, self.robot, render) self._cam_dist = 1.2 self._cam_yaw = 30 self._cam_roll = 0 self._cam_pitch = -30 self._render_width = 320 self._render_height = 240 self._cam_pos = [0, 0, .4] self.setCamera() self.eyes = {} self.reward_func = DefaultRewardFunc self.robot.used_objects = ["table", "tomato", "mustard", "cube"] self.set_eye("eye") self.goal = Goal(retina=self.observation_space.spaces[ self.robot.ObsSpaces.GOAL].sample()*0) # Set default goals dataset path # # The goals dataset is basically a list of real_robots.envs.env.Goal # objects which are stored using : # # np.savez_compressed( # "path.npy.npz", # list_of_goals) # self.goals_dataset_path = os.path.join( real_robots.getPackageDataPath(), "goals_dataset.npy.npz") self.goals = None self.goal_idx = -1
Example #30
Source File: utils.py From imgclsmob with MIT License | 5 votes |
def save_model_params(sess, file_path): # assert file_path.endswith('.npz') param_dict = {v.name: v.eval(sess) for v in tf.global_variables()} np.savez_compressed(file_path, **param_dict)