Python numpy.squeeze() Examples
The following are 30
code examples of numpy.squeeze().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: common.py From cat-bbs with MIT License | 6 votes |
def draw_heatmap(img, heatmap, alpha=0.5): """Draw a heatmap overlay over an image.""" assert len(heatmap.shape) == 2 or \ (len(heatmap.shape) == 3 and heatmap.shape[2] == 1) assert img.dtype in [np.uint8, np.int32, np.int64] assert heatmap.dtype in [np.float32, np.float64] if img.shape[0:2] != heatmap.shape[0:2]: heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8) heatmap_rs = ia.imresize_single_image( heatmap_rs[..., np.newaxis], img.shape[0:2], interpolation="nearest" ) heatmap = np.squeeze(heatmap_rs) / 255.0 cmap = plt.get_cmap('jet') heatmap_cmapped = cmap(heatmap) heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2) heatmap_cmapped = heatmap_cmapped * 255 mix = (1-alpha) * img + alpha * heatmap_cmapped mix = np.clip(mix, 0, 255).astype(np.uint8) return mix
Example #2
Source File: competition_model_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def predict(self, batch_inputs, batch_ruitu, batch_ids): #assert batch_ruitu.shape[0] == batch_inputs.shape[0], 'Shape Error' #assert batch_inputs.shape[1] == 28 and batch_inputs.shape[2] == 10 and batch_inputs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)' #assert batch_ruitu.shape[1] == 37 and batch_ruitu.shape[2] == 10 and batch_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10, 29)' pred_result_list = [] for i in range(10): #print('Predict for station: 9000{}'.format(i+1)) result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:], batch_ids[:,:,i]]) result = np.squeeze(result, axis=0) #all_pred[i] = result pred_result_list.append(result) #pass pred_result = np.stack(pred_result_list, axis=0) #return all_pred, pred_result print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m') self.pred_result = pred_result return pred_result
Example #3
Source File: metrics.py From fine-lm with MIT License | 6 votes |
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero): """Recall of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_recall", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Example #4
Source File: seq2seq_attention_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states): """Return the topK results and new decoder states.""" feed = { self._enc_top_states: enc_top_states, self._dec_in_state: np.squeeze(np.array(dec_init_states)), self._abstracts: np.transpose(np.array([latest_tokens])), self._abstract_lens: np.ones([len(dec_init_states)], np.int32)} results = sess.run( [self._topk_ids, self._topk_log_probs, self._dec_out_state], feed_dict=feed) ids, probs, states = results[0], results[1], results[2] new_states = [s for s in states] return ids, probs, new_states
Example #5
Source File: gradcam.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def visualize(net, preprocessed_img, orig_img, conv_layer_name): # Returns grad-cam heatmap, guided grad-cam, guided grad-cam saliency imggrad = get_image_grad(net, preprocessed_img) conv_out, conv_out_grad = get_conv_out_grad(net, preprocessed_img, conv_layer_name=conv_layer_name) cam = get_cam(imggrad, conv_out) ggcam = get_guided_grad_cam(cam, imggrad) img_ggcam = grad_to_image(ggcam) img_heatmap = get_img_heatmap(orig_img, cam) ggcam_gray = to_grayscale(ggcam) img_ggcam_gray = np.squeeze(grad_to_image(ggcam_gray)) return img_heatmap, img_ggcam, img_ggcam_gray
Example #6
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def apply_affine(aff, coords): ''' apply_affine(affine, coords) yields the result of applying the given affine transformation to the given coordinate or coordinates. This function expects coords to be a (dims X n) matrix but if the first dimension is neither 2 nor 3, coords.T is used; i.e.: apply_affine(affine3x3, coords2xN) ==> newcoords2xN apply_affine(affine4x4, coords3xN) ==> newcoords3xN apply_affine(affine3x3, coordsNx2) ==> newcoordsNx2 (for N != 2) apply_affine(affine4x4, coordsNx3) ==> newcoordsNx3 (for N != 3) ''' if aff is None: return coords (coords,tr) = (np.asanyarray(coords), False) if len(coords.shape) == 1: return np.squeeze(apply_affine(np.reshape(coords, (-1,1)), aff)) elif len(coords.shape) > 2: raise ValueError('cannot apply affine to ND-array for N > 2') if len(coords) == 2: aff = to_affine(aff, 2) elif len(coords) == 3: aff = to_affine(aff, 3) else: (coords,aff,tr) = (coords.T, to_affine(aff, coords.shape[1]), True) r = np.dot(aff, np.vstack([coords, np.ones([1,coords.shape[1]])]))[:-1] return r.T if tr else r
Example #7
Source File: dataloader.py From models with MIT License | 6 votes |
def __getitem__(self, idx): if self.fasta_extractor is None: self.fasta_extractor = FastaExtractor(self.fasta_file) interval = self.bt[idx] if interval.stop - interval.start != self.SEQ_WIDTH: raise ValueError("Expected the interval to be {0} wide. Recieved stop - start = {1}". format(self.SEQ_WIDTH, interval.stop - interval.start)) # Run the fasta extractor seq = np.squeeze(self.fasta_extractor([interval]), axis=0) return { "inputs": {"dna": seq}, "metadata": { "ranges": GenomicRanges.from_interval(interval) } }
Example #8
Source File: dataloader.py From models with MIT License | 6 votes |
def __getitem__(self, idx): if self.fasta_extractor is None: self.fasta_extractor = FastaExtractor(self.fasta_file) interval = self.bt[idx] if interval.stop - interval.start != self.SEQ_WIDTH: raise ValueError("Expected the interval to be {0} wide. Recieved stop - start = {1}". format(self.SEQ_WIDTH, interval.stop - interval.start)) # Run the fasta extractor seq = np.squeeze(self.fasta_extractor([interval]), axis=0) return { "inputs": {"dna": seq}, "metadata": { "ranges": GenomicRanges.from_interval(interval) } }
Example #9
Source File: dataloader.py From models with MIT License | 6 votes |
def __getitem__(self, idx): if self.fasta_extractor is None: self.fasta_extractor = FastaExtractor(self.fasta_file) interval = self.bt[idx] if interval.stop - interval.start != self.SEQ_WIDTH: raise ValueError("Expected the interval to be {0} wide. Recieved stop - start = {1}". format(self.SEQ_WIDTH, interval.stop - interval.start)) # Run the fasta extractor seq = np.squeeze(self.fasta_extractor([interval]), axis=0) return { "inputs": {"dna": seq}, "metadata": { "ranges": GenomicRanges.from_interval(interval) } }
Example #10
Source File: dataloader.py From models with MIT License | 6 votes |
def __getitem__(self, idx): if self.fasta_extractor is None: self.fasta_extractor = FastaExtractor(self.fasta_file) interval = self.bt[idx] if interval.stop - interval.start != self.SEQ_WIDTH: raise ValueError("Expected the interval to be {0} wide. Recieved stop - start = {1}". format(self.SEQ_WIDTH, interval.stop - interval.start)) # Run the fasta extractor seq = np.squeeze(self.fasta_extractor([interval]), axis=0) return { "inputs": {"dna": seq}, "metadata": { "ranges": GenomicRanges.from_interval(interval) } }
Example #11
Source File: dataloader.py From models with MIT License | 6 votes |
def __getitem__(self, idx): if self.fasta_extractor is None: self.fasta_extractor = FastaExtractor(self.fasta_file) interval = self.bt[idx] if interval.stop - interval.start != self.SEQ_WIDTH: raise ValueError("Expected the interval to be {0} wide. Recieved stop - start = {1}". format(self.SEQ_WIDTH, interval.stop - interval.start)) # Run the fasta extractor seq = np.squeeze(self.fasta_extractor([interval]), axis=0) return { "inputs": {"dna": seq}, "metadata": { "ranges": GenomicRanges.from_interval(interval) } }
Example #12
Source File: competition_model_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def predict(self, batch_inputs, batch_ruitu): assert batch_ruitu.shape[0] == batch_inputs.shape[0], 'Shape Error' assert batch_inputs.shape[1] == 28 and batch_inputs.shape[2] == 10 and batch_inputs.shape[3] == 9, 'Error! Obs input shape must be (None, 28,10,9)' assert batch_ruitu.shape[1] == 37 and batch_ruitu.shape[2] == 10 and batch_ruitu.shape[3] == 29, 'Error! Ruitu input shape must be (None, 37,10, 29)' #all_pred={} pred_result_list = [] for i in range(10): #print('Predict for station: 9000{}'.format(i+1)) result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:]]) result = np.squeeze(result, axis=0) #all_pred[i] = result pred_result_list.append(result) #pass pred_result = np.stack(pred_result_list, axis=0) #return all_pred, pred_result print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m') self.pred_result = pred_result return pred_result
Example #13
Source File: utils.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True): def attack(a): session = tf.Session() with session.as_default(): model = RVBCleverhansModel(a) adversarial_image = cleverhans_attack_fn(model, session, a) adversarial_image = np.squeeze(adversarial_image, axis=0) if reset: # optionally, reset to ignore other adversarials # found during the search a._reset() # run predictions to make sure the returned adversarial # is taken into account min_, max_ = a.bounds() adversarial_image = np.clip(adversarial_image, min_, max_) a.predictions(adversarial_image) return attack
Example #14
Source File: keplerSTM_indprop.py From EXOSIMS with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, x0, mu, epsmult = 4.0, noc = False): #determine number of planets and validate input nplanets = x0.size/6. if (nplanets - np.floor(nplanets) > 0): raise Exception('The length of x0 must be a multiple of 6.') if (mu.size != nplanets): raise Exception('The length of mu must be the length of x0 divided by 6') self.nplanets = int(nplanets) self.mu = np.squeeze(mu) if (self.mu.size == 1): self.mu = np.array(mu) self.epsmult = epsmult if not(noc) and ('EXOSIMS.util.KeplerSTM_C.CyKeplerSTM' in sys.modules): self.havec = True self.x0 = np.squeeze(x0) else: self.havec = False self.updateState(np.squeeze(x0))
Example #15
Source File: mujoco_dset.py From lirpg with MIT License | 5 votes |
def __init__(self, expert_path, train_fraction=0.7, traj_limitation=-1, randomize=True): traj_data = np.load(expert_path) if traj_limitation < 0: traj_limitation = len(traj_data['obs']) obs = traj_data['obs'][:traj_limitation] acs = traj_data['acs'][:traj_limitation] def flatten(x): # x.shape = (E,), or (E, L, D) _, size = x[0].shape episode_length = [len(i) for i in x] y = np.zeros((sum(episode_length), size)) start_idx = 0 for l, x_i in zip(episode_length, x): y[start_idx:(start_idx+l)] = x_i start_idx += l return y self.obs = np.array(flatten(obs)) self.acs = np.array(flatten(acs)) self.rets = traj_data['ep_rets'][:traj_limitation] self.avg_ret = sum(self.rets)/len(self.rets) self.std_ret = np.std(np.array(self.rets)) if len(self.acs) > 2: self.acs = np.squeeze(self.acs) assert len(self.obs) == len(self.acs) self.num_traj = min(traj_limitation, len(traj_data['obs'])) self.num_transition = len(self.obs) self.randomize = randomize self.dset = Dset(self.obs, self.acs, self.randomize) # for behavior cloning self.train_set = Dset(self.obs[:int(self.num_transition*train_fraction), :], self.acs[:int(self.num_transition*train_fraction), :], self.randomize) self.val_set = Dset(self.obs[int(self.num_transition*train_fraction):, :], self.acs[int(self.num_transition*train_fraction):, :], self.randomize) self.log_info()
Example #16
Source File: conftest.py From stft with MIT License | 5 votes |
def signal(channels, length): return numpy.squeeze(numpy.random.random((length, channels)))
Example #17
Source File: competition_model_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 5 votes |
def predict(self, batch_ruitu): pred_result_list = [] for i in range(10): #print('Predict for station: 9000{}'.format(i+1)) result = self.model.predict(x=[batch_ruitu[:,:,i,:]]) result = np.squeeze(result, axis=0) #all_pred[i] = result pred_result_list.append(result) #pass pred_result = np.stack(pred_result_list, axis=0) #return all_pred, pred_result print('Predict shape (10,37,3) means (stationID, timestep, features). Features include: t2m, rh2m and w10m') self.pred_result = pred_result return pred_result
Example #18
Source File: test_things.py From stft with MIT License | 5 votes |
def test_shape(length, framelength): a = numpy.squeeze(numpy.random.random((length, 1))) x = stft.spectrogram(a, framelength=framelength, halved=True) assert x.shape[0] == framelength / 2 + 1 x_2 = stft.spectrogram(a, framelength=framelength, halved=False) assert x_2.shape[0] == framelength
Example #19
Source File: metrics.py From fine-lm with MIT License | 5 votes |
def rounding_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Sequence accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions), axis=-1) weights = weights_fn(labels) labels = tf.to_int32(labels) not_correct = tf.to_float(tf.not_equal(outputs, labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Example #20
Source File: text_encoder.py From fine-lm with MIT License | 5 votes |
def decode(self, ids, strip_extraneous=False): del strip_extraneous label_id = ids if isinstance(label_id, np.ndarray): label_id = np.squeeze(label_id).astype(np.int8).tolist() assert isinstance(label_id, list) assert len(label_id) == self.vocab_size return self._class_labels[label_id.index(1)]
Example #21
Source File: seq2seq_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 5 votes |
def predict(self, batch_inputs, batch_ruitu, batch_ids, batch_times): ''' Input: Output: pred_result (mean value) : (None, 10,37,3). i.e., (sample_nums, stationID, timestep, features) pred_var_result (var value) : (None, 10,37,3) ''' pred_result_list = [] pred_var_list = [] #pred_std_list =[] for i in range(10): result = self.model.predict(x=[batch_inputs[:,:,i,:], batch_ruitu[:,:,i,:], batch_ids[:,:,i], batch_times]) var_result = result[:,:,3:6] # Variance result = result[:,:,0:3] # Mean #result = np.squeeze(result, axis=0) pred_result_list.append(result) #var_result = np.squeeze(var_result, axis=0) pred_var_list.append(var_result) pred_result = np.stack(pred_result_list, axis=1) pred_var_result = np.stack(pred_var_list, axis=1) print('Predictive shape (None, 10,37,3) means (sample_nums, stationID, timestep, features). \ Features include: t2m, rh2m and w10m') self.pred_result = pred_result self.pred_var_result = pred_var_result #self.pred_std_result = np.sqrt(np.exp(self.pred_var_result[:,:,i,j])) # Calculate standard deviation return pred_result, pred_var_result
Example #22
Source File: object_detection_evaluation.py From DOTA_models with Apache License 2.0 | 5 votes |
def evaluate(self): """Compute evaluation result. Returns: average_precision_per_class: float numpy array of average precision for each class. mean_ap: mean average precision of all classes, float scalar precisions_per_class: List of precisions, each precision is a float numpy array recalls_per_class: List of recalls, each recall is a float numpy array corloc_per_class: numpy float array mean_corloc: Mean CorLoc score for each class, float scalar """ if (self.num_gt_instances_per_class == 0).any(): logging.warn( 'The following classes have no ground truth examples: %s', np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0))) for class_index in range(self.num_class): if self.num_gt_instances_per_class[class_index] == 0: continue scores = np.concatenate(self.scores_per_class[class_index]) tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index]) precision, recall = metrics.compute_precision_recall( scores, tp_fp_labels, self.num_gt_instances_per_class[class_index]) self.precisions_per_class.append(precision) self.recalls_per_class.append(recall) average_precision = metrics.compute_average_precision(precision, recall) self.average_precision_per_class[class_index] = average_precision self.corloc_per_class = metrics.compute_cor_loc( self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class) mean_ap = np.nanmean(self.average_precision_per_class) mean_corloc = np.nanmean(self.corloc_per_class) return (self.average_precision_per_class, mean_ap, self.precisions_per_class, self.recalls_per_class, self.corloc_per_class, mean_corloc)
Example #23
Source File: test_utils_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_diagonal_gradient_image(self): """Tests if a good pyramid image is created.""" pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2) # Test which is easy to understand. expected_first_channel = np.array([[3, 2, 1, 0], [4, 3, 2, 1], [5, 4, 3, 2]], dtype=np.float32) self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]), expected_first_channel) # Actual test. expected_image = np.array([[[3, 30], [2, 20], [1, 10], [0, 0]], [[4, 40], [3, 30], [2, 20], [1, 10]], [[5, 50], [4, 40], [3, 30], [2, 20]]], dtype=np.float32) self.assertAllEqual(pyramid_image, expected_image)
Example #24
Source File: classify_image.py From DOTA_models with Apache License 2.0 | 5 votes |
def run_inference_on_image(image): """Runs inference on an image. Args: image: Image file name. Returns: Nothing """ if not tf.gfile.Exists(image): tf.logging.fatal('File does not exist %s', image) image_data = tf.gfile.FastGFile(image, 'rb').read() # Creates graph from saved GraphDef. create_graph() with tf.Session() as sess: # Some useful tensors: # 'softmax:0': A tensor containing the normalized prediction across # 1000 labels. # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 # float description of the image. # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG # encoding of the image. # Runs the softmax tensor by feeding the image_data as input to the graph. softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = NodeLookup() top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] print('%s (score = %.5f)' % (human_string, score))
Example #25
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def load_nifti(filename, to='auto'): ''' load_nifti(filename) yields the Nifti1Image or Nifti2Image referened by the given filename by using the nibabel load function. The optional argument to may be used to coerce the resulting data to a particular format; the following arguments are understood: * 'header' will yield just the image header * 'data' will yield the image's data-array * 'field' will yield a squeezed version of the image's data-array and will raise an error if the data object has more than 2 non-unitary dimensions (appropriate for loading surface properties stored in image files) * 'affine' will yield the image's affine transformation * 'image' will yield the raw image object * 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions, in which case it is assumed to be a surface-field and the return value is equivalent to the 'field' value. ''' img = nib.load(filename) to = to.lower() if to == 'image': return img elif to == 'data': return img.dataobj elif to == 'affine': return img.affine elif to == 'header': return img.header elif to == 'field': dat = np.squeeze(np.asarray(img.dataobj)) if len(dat.shape) > 2: raise ValueError('image requested as field has more than 2 non-unitary dimensions') return dat elif to in ['auto', 'automatic']: dims = set(np.shape(img.dataobj)) if 1 < len(dims) < 4 and 1 in dims: return np.squeeze(np.asarray(img.dataobj)) else: return img else: raise ValueError('unrecognized \'to\' argument \'%s\'' % to)
Example #26
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def jac(self): ''' pf.jac() yields a jacobian calculation function for the given potential function pf that is appropritate for passing to a minimizer. ''' def _jacobian(x): dz = self.jacobian(x) if sps.issparse(dz): dz = dz.toarray() dz = np.asarray(dz) return np.squeeze(dz) return _jacobian
Example #27
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def __call__(self, params): ''' pf(params) yields the tuple (z, dz) where z is the potential value at the given parameters vector, params, and dz is the vector of the potential gradient. ''' z = self.value(params) dz = self.jacobian(params) if sps.issparse(dz): dz = dz.toarray() z = np.squeeze(z) dz = np.squeeze(dz) return (z,dz)
Example #28
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def load_mgh(filename, to='auto'): ''' load_mgh(filename) yields the MGHImage referened by the given filename by using the nibabel.freesurfer.mghformat.load function. The optional argument 'to' may be used to coerce the resulting data to a particular format; the following arguments are understood: * 'header' will yield just the image header * 'data' will yield the image's data-array * 'field' will yield a squeezed version of the image's data-array and will raise an error if the data object has more than 2 non-unitary dimensions (appropriate for loading surface properties stored in image files) * 'affine' will yield the image's affine transformation * 'image' will yield the raw image object * 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions, in which case it is assumed to be a surface-field and the return value is equivalent to the 'field' value. ''' img = fsmgh.load(filename) to = to.lower() if to == 'image': return img elif to == 'data': return img.dataobj elif to == 'affine': return img.affine elif to == 'header': return img.header elif to == 'field': dat = np.squeeze(img.dataobj) if len(dat.shape) > 2: raise ValueError('image requested as field has more than 2 non-unitary dimensions') return dat elif to in ['auto', 'automatic']: dims = set(img.dataobj.shape) if 1 < len(dims) < 4 and 1 in dims: return np.squeeze(img.dataobj) else: return img else: raise ValueError('unrecognized \'to\' argument \'%s\'' % to)
Example #29
Source File: files.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def load_cifti(filename, to='auto'): ''' load_cifti(filename) yields the cifti image referened by the given filename by using the nibabel load function. The optional argument to may be used to coerce the resulting data to a particular format; the following arguments are understood: * 'header' will yield just the image header * 'data' will yield the image's data-array * 'field' will yield a squeezed version of the image's data-array and will raise an error if the data object has more than 2 non-unitary dimensions (appropriate for loading surface properties stored in image files) * 'image' will yield the raw image object * 'auto' is equivalent to 'image' unless the image has no more than 2 non-unitary dimensions, in which case it is assumed to be a surface-field and the return value is equivalent to the 'field' value. ''' img = nib.load(filename) if not isinstance(img, nib.cifti2.Cifti2Image): raise ValueError('given file is not a cifti image') to = 'auto' if to is None else to.lower() if to == 'image': return img elif to == 'data': return img.dataobj elif to == 'header': return img.header elif to == 'field': dat = np.squeeze(np.asarray(img.dataobj)) if len(dat.shape) > 2: raise ValueError('image requested as field has more than 2 non-unitary dimensions') return dat elif to in ['auto', 'automatic']: dims = set(np.shape(img.dataobj)) if 1 < len(dims) < 4 and 1 in dims: return np.squeeze(np.asarray(img.dataobj)) else: return img else: raise ValueError('unrecognized \'to\' argument \'%s\'' % to)
Example #30
Source File: files.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def gifti_to_array(gii): ''' gifti_to_array(gii) yields the squeezed array of data contained in the given gifti object, gii, Note that if gii does not contain simple data in its darray object, then this will produce undefined results. This operation is effectively equivalent to: np.squeeze([x.data for x in gii.darrays]). gifti_to_array(gii_filename) is equivalent to gifti_to_array(neyropythy.load(gii_filename)). ''' if pimms.is_str(gii): return gifti_to_array(ny.load(gii, 'gifti')) elif pimms.is_nparray(gii): return gii #already done elif isinstance(gii, nib.gifti.gifti.GiftiImage): return np.squeeze(np.asarray([x.data for x in gii.darrays])) else: raise ValueError('Could not understand argument to gifti_to_array')