Python numpy.expand_dims() Examples
The following are 30
code examples of numpy.expand_dims().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: imagenet.py From vergeml with MIT License | 7 votes |
def predict(self, f, k=5, resize_mode='fill'): from keras.preprocessing import image from vergeml.img import resize_image filename = os.path.basename(f) if not os.path.exists(f): return dict(filename=filename, prediction=[]) img = image.load_img(f) img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = self.preprocess_input(x) preds = self.model.predict(x) pred = self._decode(preds, top=k)[0] prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred] return dict(filename=filename, prediction=prediction)
Example #2
Source File: competition_model_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None): max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features) if certain_id == None and certain_feature == None: id_ = np.random.randint(max_j, size=batch_size) i = np.random.randint(max_i, size=batch_size) batch_inputs = data_inputs[i,:,id_,:] batch_ouputs = ground_truth[i,:,id_,:] batch_ruitu = ruitu_inputs[i,:,id_,:] # id used for embedding expd_id = np.expand_dims(id_,axis=1) batch_ids = np.tile(expd_id,(1,37)) #batch_time = elif certain_id != None: pass return batch_inputs, batch_ruitu, batch_ouputs, batch_ids
Example #3
Source File: features.py From vergeml with MIT License | 6 votes |
def transform(self, sample): if not self.model: if not self.architecture.startswith("@"): self.preprocess_input = get_preprocess_input(self.architecture) self.model = get_imagenet_architecture(self.architecture, self.variant, self.image_size, self.alpha, self.output_layer) else: # TODO get image size! self.model = get_custom_architecture(self.architecture, self.trainings_dir, self.output_layer) self.preprocess_input = generic_preprocess_input x = sample.x # TODO better resize x = x.convert('RGB') x = resize_image(x, self.image_size, self.image_size, 'antialias', 'aspect-fill') # x = x.resize((self.image_size, self.image_size)) x = np.asarray(x) x = np.expand_dims(x, axis=0) x = self.preprocess_input(x) features = self.model.predict(x) features = features.flatten() sample.x = features sample = super().transform(sample) return sample
Example #4
Source File: formating.py From mmdetection with Apache License 2.0 | 6 votes |
def __call__(self, results): """Call function to convert image in results to :obj:`torch.Tensor` and transpose the channel order. Args: results (dict): Result dict contains the image data to convert. Returns: dict: The result dict contains the image converted to :obj:`torch.Tensor` and transposed to (C, H, W) order. """ for key in self.keys: img = results[key] if len(img.shape) < 3: img = np.expand_dims(img, -1) results[key] = to_tensor(img.transpose(2, 0, 1)) return results
Example #5
Source File: features.py From vergeml with MIT License | 6 votes |
def transform(self, sample): if not self.model: if not self.architecture.startswith("@"): _, self.preprocess_input, self.model = \ get_imagenet_architecture(self.architecture, self.variant, self.size, self.alpha, self.output_layer) else: self.model = get_custom_architecture(self.architecture, self.trainings_dir, self.output_layer) self.preprocess_input = generic_preprocess_input x = sample.x x = x.convert('RGB') x = resize_image(x, self.image_size, self.image_size, 'antialias', 'aspect-fill') #x = x.resize((self.image_size, self.image_size)) x = np.asarray(x) x = np.expand_dims(x, axis=0) x = self.preprocess_input(x) features = self.model.predict(x) features = features.flatten() sample.x = features sample.y = None return sample
Example #6
Source File: von_mises_stress.py From fenics-topopt with MIT License | 6 votes |
def calculate_fdiff_stress(self, x, u, nu, side=1, dx=1e-6): """ Calculate the derivative of the Von Mises stress using finite differences given the densities x, displacements u, and young modulus nu. Optionally, provide the side length (default: 1) and delta x (default: 1e-6). """ ds = self.calculate_diff_stress(x, u, nu, side) dsf = numpy.zeros(x.shape) x = numpy.expand_dims(x, -1) for i in range(x.shape[0]): delta = scipy.sparse.coo_matrix(([dx], [[i], [0]]), shape=x.shape) s1 = self.calculate_stress((x + delta.A).squeeze(), u, nu, side) s2 = self.calculate_stress((x - delta.A).squeeze(), u, nu, side) dsf[i] = ((s1 - s2) / (2. * dx))[i] print("finite differences: {:g}".format(numpy.linalg.norm(dsf - ds))) return dsf
Example #7
Source File: von_mises_stress.py From fenics-topopt with MIT License | 6 votes |
def calculate_fdiff_stress(self, x, u, nu, side=1, dx=1e-6): """ Calculate the derivative of the Von Mises stress using finite differences given the densities x, displacements u, and young modulus nu. Optionally, provide the side length (default: 1) and delta x (default: 1e-6). """ ds = self.calculate_diff_stress(x, u, nu, side) dsf = numpy.zeros(x.shape) x = numpy.expand_dims(x, -1) for i in range(x.shape[0]): delta = scipy.sparse.coo_matrix(([dx], [[i], [0]]), shape=x.shape) s1 = self.calculate_stress((x + delta.A).squeeze(), u, nu, side) s2 = self.calculate_stress((x - delta.A).squeeze(), u, nu, side) dsf[i] = ((s1 - s2) / (2. * dx))[i] print("finite differences: {:g}".format(numpy.linalg.norm(dsf - ds))) return dsf
Example #8
Source File: vfn_eval.py From view-finding-network with GNU General Public License v3.0 | 6 votes |
def evaluate_sliding_window(img_filename, crops): img = io.imread(img_filename).astype(np.float32)/255 if img.ndim == 2: # Handle B/W images img = np.expand_dims(img, axis=-1) img = np.repeat(img, 3, 2) img_crops = np.zeros((batch_size, 227, 227, 3)) for i in xrange(len(crops)): crop = crops[i] img_crop = transform.resize(img[crop[1]:crop[1]+crop[3],crop[0]:crop[0]+crop[2]], (227, 227))-0.5 img_crop = np.expand_dims(img_crop, axis=0) img_crops[i,:,:,:] = img_crop # compute ranking scores scores = sess.run([score_func], feed_dict={image_placeholder: img_crops}) # find the optimal crop idx = np.argmax(scores[:len(crops)]) best_window = crops[idx] # return the best crop return (best_window[0], best_window[1], best_window[2], best_window[3])
Example #9
Source File: dataloader_m.py From models with MIT License | 6 votes |
def _prepro_cpg(self, states, dists): """Preprocess the state and distance of neighboring CpG sites.""" prepro_states = [] prepro_dists = [] for state, dist in zip(states, dists): nan = state == dat.CPG_NAN if np.any(nan): state[nan] = np.random.binomial(1, state[~nan].mean(), nan.sum()) dist[nan] = self.cpg_max_dist dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist prepro_states.append(np.expand_dims(state, 1)) prepro_dists.append(np.expand_dims(dist, 1)) prepro_states = np.concatenate(prepro_states, axis=1) prepro_dists = np.concatenate(prepro_dists, axis=1) if self.cpg_wlen: center = prepro_states.shape[2] // 2 delta = self.cpg_wlen // 2 tmp = slice(center - delta, center + delta) prepro_states = prepro_states[:, :, tmp] prepro_dists = prepro_dists[:, :, tmp] return (prepro_states, prepro_dists)
Example #10
Source File: model.py From models with MIT License | 6 votes |
def predict_on_batch(self, inputs): # write test fasta file temp_input = tempfile.NamedTemporaryFile(suffix = ".txt") test_fname = temp_input.name encode_sequence_into_fasta_file(ofname = test_fname, seq = inputs.tolist()) # test gkmsvm temp_ofp = tempfile.NamedTemporaryFile(suffix = ".txt") threads_option = '-T %s' % (str(self.threads)) verbosity_option = '-v 0' command = ' '.join(['gkmpredict', test_fname, self.model_file, temp_ofp.name, threads_option, verbosity_option]) #process = subprocess.Popen(command, shell=True) #process.wait() # wait for it to finish exit_code = os.system(command) temp_input.close() assert exit_code == 0 # get classification results temp_ofp.seek(0) y = np.array([line.split()[-1] for line in temp_ofp], dtype=float) temp_ofp.close() return np.expand_dims(y, 1)
Example #11
Source File: dataset.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 6 votes |
def reward(tsptw_sequence,speed): # Convert sequence to tour (end=start) tour = np.concatenate((tsptw_sequence,np.expand_dims(tsptw_sequence[0],0))) # Compute tour length inter_city_distances = np.sqrt(np.sum(np.square(tour[:-1,:2]-tour[1:,:2]),axis=1)) distance = np.sum(inter_city_distances) # Compute develiry times at each city and count late cities elapsed_time = -10 late_cities = 0 for i in range(tsptw_sequence.shape[0]-1): travel_time = inter_city_distances[i]/speed tw_open = tour[i+1,2] tw_close = tour[i+1,3] elapsed_time += travel_time if elapsed_time <= tw_open: elapsed_time = tw_open elif elapsed_time > tw_close: late_cities += 1 # Reward return distance + 100000000*late_cities # Swap city[i] with city[j] in sequence
Example #12
Source File: vis_utils.py From ACAN with MIT License | 6 votes |
def colored_depthmap(depth, d_min=None, d_max=None, cmap=plt.cm.jet): """ Parameters ---------- depth : numpy.ndarray shape [batch_size, h, w] or [h, w] """ if len(depth.shape) == 2: depth = np.expand_dims(depth, 0) if d_min is None: d_min = np.min(depth) if d_max is None: d_max = np.max(depth) depth = (depth - d_min) / (d_max - d_min) b, h, w = depth.shape depth_color = np.zeros((b, h, w, 3)) for d in range(depth_color.shape[0]): depth_color[d] = cmap(depth[d])[:, :, :3] return np.asarray(255 * depth_color, dtype=np.uint8)
Example #13
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 6 votes |
def __init__(self, input_wave_file, output_wave_file, target_phrase): self.pop_size = 100 self.elite_size = 10 self.mutation_p = 0.005 self.noise_stdev = 40 self.noise_threshold = 1 self.mu = 0.9 self.alpha = 0.001 self.max_iters = 3000 self.num_points_estimate = 100 self.delta_for_gradient = 100 self.delta_for_perturbation = 1e3 self.input_audio = load_wav(input_wave_file).astype(np.float32) self.pop = np.expand_dims(self.input_audio, axis=0) self.pop = np.tile(self.pop, (self.pop_size, 1)) self.output_wave_file = output_wave_file self.target_phrase = target_phrase self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase]))
Example #14
Source File: metrics_test.py From fine-lm with MIT License | 6 votes |
def testSigmoidAccuracyOneHot(self): logits = np.array([ [-1., 1.], [1., -1.], [-1., 1.], [1., -1.] ]) labels = np.array([ [0, 1], [1, 0], [1, 0], [0, 1] ]) logits = np.expand_dims(np.expand_dims(logits, 1), 1) labels = np.expand_dims(np.expand_dims(labels, 1), 1) with self.test_session() as session: score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels) session.run(tf.global_variables_initializer()) session.run(tf.local_variables_initializer()) s = session.run(score) self.assertEqual(s, 0.5)
Example #15
Source File: optimizers.py From DOTA_models with Apache License 2.0 | 6 votes |
def optimize(self, sess, feed_dict): reg_input, reg_weight, old_values, targets = sess.run( [self.inputs, self.regression_weight, self.values, self.targets], feed_dict=feed_dict) intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac) # taken from rllab reg_coeff = 1e-5 for _ in range(5): best_fit_weight = np.linalg.lstsq( reg_input.T.dot(reg_input) + reg_coeff * np.identity(reg_input.shape[1]), reg_input.T.dot(intended_values))[0] if not np.any(np.isnan(best_fit_weight)): break reg_coeff *= 10 if len(best_fit_weight.shape) == 1: best_fit_weight = np.expand_dims(best_fit_weight, -1) sess.run(self.update_regression_weight, feed_dict={self.new_regression_weight: best_fit_weight})
Example #16
Source File: np_box_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def ioa(boxes1, boxes2): """Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise ioa scores. """ intersect = intersection(boxes1, boxes2) areas = np.expand_dims(area(boxes2), axis=0) return intersect / areas
Example #17
Source File: np_box_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def iou(boxes1, boxes2): """Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ intersect = intersection(boxes1, boxes2) area1 = area(boxes1) area2 = area(boxes2) union = np.expand_dims(area1, axis=1) + np.expand_dims( area2, axis=0) - intersect return intersect / union
Example #18
Source File: depth_utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def get_point_cloud_from_z(Y, camera_matrix): """Projects the depth image Y into a 3D point cloud. Inputs: Y is ...xHxW camera_matrix Outputs: X is positive going right Y is positive into the image Z is positive up in the image XYZ is ...xHxWx3 """ x, z = np.meshgrid(np.arange(Y.shape[-1]), np.arange(Y.shape[-2]-1, -1, -1)) for i in range(Y.ndim-2): x = np.expand_dims(x, axis=0) z = np.expand_dims(z, axis=0) X = (x-camera_matrix.xc) * Y / camera_matrix.f Z = (z-camera_matrix.zc) * Y / camera_matrix.f XYZ = np.concatenate((X[...,np.newaxis], Y[...,np.newaxis], Z[...,np.newaxis]), axis=X.ndim) return XYZ
Example #19
Source File: envs.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def preprocess(self, img): """ Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size) float vector. """ # Crop, down-sample, erase background and set foreground to 1. # See https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 img = img[35:195] img = img[::2, ::2, 0] img[img == 144] = 0 img[img == 109] = 0 img[img != 0] = 1 curr = np.expand_dims(img.astype(np.float).ravel(), axis=0) # Subtract the last preprocessed image. diff = (curr - self.prev if self.prev is not None else np.zeros((1, curr.shape[1]))) self.prev = curr return diff
Example #20
Source File: image_segmentaion.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def get_data(img_path): """get the (1, 3, h, w) np.array data for the supplied image Args: img_path (string): the input image path Returns: np.array: image data in a (1, 3, h, w) shape """ mean = np.array([123.68, 116.779, 103.939]) # (R,G,B) img = Image.open(img_path) img = np.array(img, dtype=np.float32) reshaped_mean = mean.reshape(1, 1, 3) img = img - reshaped_mean img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) img = np.expand_dims(img, axis=0) return img
Example #21
Source File: kitti_common.py From kitti-object-eval-python with MIT License | 6 votes |
def iou(boxes1, boxes2, add1=False): """Computes pairwise intersection-over-union between box collections. Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ intersect = intersection(boxes1, boxes2, add1) area1 = area(boxes1, add1) area2 = area(boxes2, add1) union = np.expand_dims( area1, axis=1) + np.expand_dims( area2, axis=0) - intersect return intersect / union
Example #22
Source File: prepro.py From Named-Entity-Recognition-with-Bidirectional-LSTM-CNNs with GNU General Public License v3.0 | 6 votes |
def iterate_minibatches(dataset,batch_len): start = 0 for i in batch_len: tokens = [] caseing = [] char = [] labels = [] data = dataset[start:i] start = i for dt in data: t,c,ch,l = dt l = np.expand_dims(l,-1) tokens.append(t) caseing.append(c) char.append(ch) labels.append(l) yield np.asarray(labels),np.asarray(tokens),np.asarray(caseing),np.asarray(char)
Example #23
Source File: randomproj.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def _get_mask(self, idx, in_data): """Returns the mask by which to multiply the parts of the embedding layer. In this version, we have no weights to apply. """ mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz) mask = np.expand_dims(mask,2) # shape = (b,mnz,1) mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d) return mask
Example #24
Source File: cmp_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]): with tf.name_scope('visual_frustum'): l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1] l = (l == 2).astype(np.float32) for e in expand_dims: l = np.expand_dims(l, axis=e) confs_probs = tf.constant(l, dtype=tf.float32) confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs return confs_probs
Example #25
Source File: inference_pb.py From ARU-Net with GNU General Public License v2.0 | 5 votes |
def load_img(self, path, scale, mode): aImg = misc.imread(path, mode=mode) sImg = misc.imresize(aImg, scale, interp='bicubic') fImg = sImg if len(sImg.shape) == 2: fImg = np.expand_dims(fImg,2) fImg = np.expand_dims(fImg,0) return fImg
Example #26
Source File: dataset.py From PolarSeg with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __getitem__(self, index): raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4)) if self.imageset == 'test': annotated_data = np.expand_dims(np.zeros_like(raw_data[:,0],dtype=int),axis=1) else: annotated_data = np.fromfile(self.im_idx[index].replace('velodyne','labels')[:-3]+'label', dtype=np.int32).reshape((-1,1)) annotated_data = annotated_data & 0xFFFF #delete high 16 digits binary annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data) data_tuple = (raw_data[:,:3], annotated_data.astype(np.uint8)) if self.return_ref: data_tuple += (raw_data[:,3],) return data_tuple
Example #27
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def _read_img(self, img_name, label_name): img = Image.open(os.path.join(self.root_dir, img_name)) label = Image.open(os.path.join(self.root_dir, label_name)) assert img.size == label.size img = np.array(img, dtype=np.float32) # (h, w, c) label = np.array(label) # (h, w) if self.cut_off_size is not None: max_hw = max(img.shape[0], img.shape[1]) min_hw = min(img.shape[0], img.shape[1]) if min_hw > self.cut_off_size: rand_start_max = int(np.random.uniform(0, max_hw - self.cut_off_size - 1)) rand_start_min = int(np.random.uniform(0, min_hw - self.cut_off_size - 1)) if img.shape[0] == max_hw : img = img[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size] label = label[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size] else : img = img[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size] label = label[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size] elif max_hw > self.cut_off_size: rand_start = int(np.random.uniform(0, max_hw - min_hw - 1)) if img.shape[0] == max_hw : img = img[rand_start : rand_start + min_hw, :] label = label[rand_start : rand_start + min_hw, :] else : img = img[:, rand_start : rand_start + min_hw] label = label[:, rand_start : rand_start + min_hw] reshaped_mean = self.mean.reshape(1, 1, 3) img = img - reshaped_mean img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) # (c, h, w) img = np.expand_dims(img, axis=0) # (1, c, h, w) label = np.array(label) # (h, w) label = np.expand_dims(label, axis=0) # (1, h, w) return (img, label)
Example #28
Source File: gradcam.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def to_grayscale(cv2im): """Convert gradients to grayscale. This gives a saliency map.""" # How strongly does each position activate the output grayscale_im = np.sum(np.abs(cv2im), axis=0) # Normalize between min and 99th percentile im_max = np.percentile(grayscale_im, 99) im_min = np.min(grayscale_im) grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1) grayscale_im = np.expand_dims(grayscale_im, axis=0) return grayscale_im
Example #29
Source File: generate_atomic_greedy.py From comet-commonsense with Apache License 2.0 | 5 votes |
def make_batch(X): X = np.array(X) assert X.ndim in [1, 2] if X.ndim == 1: X = np.expand_dims(X, axis=0) pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1]) pos_enc = np.expand_dims(pos_enc, axis=0) batch = np.stack([X, pos_enc], axis=-1) batch = torch.tensor(batch, dtype=torch.long).to(device) return batch
Example #30
Source File: generate_conceptnet_beam_search.py From comet-commonsense with Apache License 2.0 | 5 votes |
def make_batch(X): X = np.array(X) assert X.ndim in [1, 2] if X.ndim == 1: X = np.expand_dims(X, axis=0) pos_enc = np.arange(n_vocab + n_special, n_vocab + n_special + X.shape[-1]) pos_enc = np.expand_dims(pos_enc, axis=0) batch = np.stack([X, pos_enc], axis=-1) batch = torch.tensor(batch, dtype=torch.long).to(device) return batch