Python numpy.resize() Examples
The following are 30
code examples of numpy.resize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: run.py From mlnd_DeepTesla with GNU General Public License v3.0 | 6 votes |
def img_pre_process(img): """ Processes the image and returns it :param img: The image to be processed :return: Returns the processed image """ ## Chop off 1/3 from the top and cut bottom 150px(which contains the head of car) shape = img.shape img = img[int(shape[0]/3):shape[0]-150, 0:shape[1]] ## Resize the image img = cv2.resize(img, (params.FLAGS.img_w, params.FLAGS.img_h), interpolation=cv2.INTER_AREA) ## Return the image sized as a 4D array return np.resize(img, (params.FLAGS.img_w, params.FLAGS.img_h, params.FLAGS.img_c)) ## Process video
Example #2
Source File: load_flow.py From DenseMatchingBenchmark with MIT License | 6 votes |
def load_flo(file_path): """ Read .flo file in MiddleBury format Code adapted from: http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy WARNING: this will work on little-endian architectures (eg Intel x86) only! Args: file_path string: file path(absolute) Returns: flow (numpy.array): data of image in (Height, Width, 2) layout """ with open(file_path, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) assert(magic == 202021.25) w = int(np.fromfile(f, np.int32, count=1)) h = int(np.fromfile(f, np.int32, count=1)) # print('Reading %d x %d flo file\n' % (w, h)) flow = np.fromfile(f, np.float32, count=2 * w * h) # Reshape data into 3D array (columns, rows, bands) # The reshape here is for visualization, the original code is (w,h,2) flow = np.resize(flow, (h, w, 2)) return flow
Example #3
Source File: flowlib.py From conditional-motion-propagation with MIT License | 6 votes |
def read_flo_file(filename, memcached=False): """ Read from Middlebury .flo file :param flow_file: name of the flow file :return: optical flow data in matrix """ if memcached: filename = io.BytesIO(filename) f = open(filename, 'rb') magic = np.fromfile(f, np.float32, count=1)[0] data2d = None if 202021.25 != magic: print('Magic number incorrect. Invalid .flo file') else: w = np.fromfile(f, np.int32, count=1)[0] h = np.fromfile(f, np.int32, count=1)[0] data2d = np.fromfile(f, np.float32, count=2 * w * h) # reshape data into 3D array (columns, rows, channels) data2d = np.resize(data2d, (h, w, 2)) f.close() return data2d # fast resample layer
Example #4
Source File: test_pairwise.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_check_preserve_type(): # Ensures that type float32 is preserved. XA = np.resize(np.arange(40), (5, 8)).astype(np.float32) XB = np.resize(np.arange(40), (5, 8)).astype(np.float32) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_equal(XA_checked.dtype, np.float32) # both float32 XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_equal(XA_checked.dtype, np.float32) assert_equal(XB_checked.dtype, np.float32) # mismatched A XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float), XB) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float) # mismatched B XA_checked, XB_checked = check_pairwise_arrays(XA, XB.astype(np.float)) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float)
Example #5
Source File: test_boolean.py From vnpy_crypto with MIT License | 6 votes |
def test_broadcast(size, mask, item, box): selection = np.resize(mask, size) data = np.arange(size, dtype=float) # Construct the expected series by taking the source # data or item based on the selection expected = Series([item if use_item else data[ i] for i, use_item in enumerate(selection)]) s = Series(data) s[selection] = box(item) assert_series_equal(s, expected) s = Series(data) result = s.where(~selection, box(item)) assert_series_equal(result, expected) s = Series(data) result = s.mask(selection, box(item)) assert_series_equal(result, expected)
Example #6
Source File: flow.py From weakalign with MIT License | 6 votes |
def read_flo_file(filename,verbose=False): """ Read from .flo optical flow file (Middlebury format) :param flow_file: name of the flow file :return: optical flow data in matrix adapted from https://github.com/liruoteng/OpticalFlowToolkit/ """ f = open(filename, 'rb') magic = np.fromfile(f, np.float32, count=1) data2d = None if 202021.25 != magic: raise TypeError('Magic number incorrect. Invalid .flo file') else: w = np.fromfile(f, np.int32, count=1) h = np.fromfile(f, np.int32, count=1) if verbose: print("Reading %d x %d flow file in .flo format" % (h, w)) data2d = np.fromfile(f, np.float32, count=int(2 * w * h)) # reshape data into 3D array (columns, rows, channels) data2d = np.resize(data2d, (h[0], w[0], 2)) f.close() return data2d
Example #7
Source File: prunable_nn_test.py From prunnable-layers-pytorch with GNU General Public License v3.0 | 6 votes |
def test_PLinearDropInputs_ShouldDropRightParams(self): dropped_index = 0 # assume input is 2x2x2, 2 layers of 2x2 input_shape = (2, 2, 2) module = pnn.PLinear(8, 10) old_num_features = module.in_features old_weight = module.weight.data.cpu().numpy() resized_old_weight = np.resize(old_weight, (module.out_features, *input_shape)) module.drop_inputs(input_shape, dropped_index) new_shape = module.weight.size() # ensure that the chosen index is dropped expected_weight = np.resize(np.delete(resized_old_weight, dropped_index, 1), new_shape) output = module.weight.data.cpu().numpy() self.assertTrue(np.array_equal(output, expected_weight)) # ensure num features is reduced self.assertTrue(module.in_features, old_num_features-1)
Example #8
Source File: flow_utils.py From swiftnet with GNU General Public License v3.0 | 6 votes |
def readFlow(fn): """ Read .flo file in Middlebury format""" # Code adapted from: # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy # WARNING: this will work on little-endian architectures (eg Intel x86) only! # print 'fn = %s'%(fn) with open(fn, 'rb') as f: magic = np.fromfile(f, np.float32, count=1) if 202021.25 != magic: print('Magic number incorrect. Invalid .flo file') return None else: w = np.fromfile(f, np.int32, count=1) h = np.fromfile(f, np.int32, count=1) # print 'Reading %d x %d flo file\n' % (w, h) data = np.fromfile(f, np.float32, count=2 * int(w) * int(h)) # Reshape data into 3D array (columns, rows, bands) # The reshape here is for visualization, the original code is (w,h,2) return np.resize(data, (int(h), int(w), 2))
Example #9
Source File: nstyle.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def PreprocessContentImage(path, long_edge): img = io.imread(path) logging.info("load the content image, size = %s", img.shape[:2]) factor = float(long_edge) / max(img.shape[:2]) new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor)) resized_img = transform.resize(img, new_size) sample = np.asarray(resized_img) * 256 # swap axes to make image from (224, 224, 3) to (3, 224, 224) sample = np.swapaxes(sample, 0, 2) sample = np.swapaxes(sample, 1, 2) # sub mean sample[0, :] -= 123.68 sample[1, :] -= 116.779 sample[2, :] -= 103.939 logging.info("resize the content image to %s", new_size) return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
Example #10
Source File: contour.py From matplotlib-4-abaqus with MIT License | 5 votes |
def locate_label(self, linecontour, labelwidth): """ Find a good place to plot a label (relatively flat part of the contour). """ nsize = len(linecontour) if labelwidth > 1: xsize = int(np.ceil(nsize / labelwidth)) else: xsize = 1 if xsize == 1: ysize = nsize else: ysize = int(labelwidth) XX = np.resize(linecontour[:, 0], (xsize, ysize)) YY = np.resize(linecontour[:, 1], (xsize, ysize)) #I might have fouled up the following: yfirst = YY[:, 0].reshape(xsize, 1) ylast = YY[:, -1].reshape(xsize, 1) xfirst = XX[:, 0].reshape(xsize, 1) xlast = XX[:, -1].reshape(xsize, 1) s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst) L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel() dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1) x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth) #print 'ind, x, y', ind, x, y # There must be a more efficient way... lc = [tuple(l) for l in linecontour] dind = lc.index((x, y)) #print 'dind', dind #dind = list(linecontour).index((x,y)) return x, y, dind
Example #11
Source File: reconstruct.py From Spiking-Neural-Network with Apache License 2.0 | 5 votes |
def reconst_weights(weights, num, layer, reshape_x, reshape_y): weights = np.array(weights) weights = np.reshape(weights, (reshape_x, reshape_y)) img = np.zeros((reshape_x, reshape_y)) for i in range(reshape_x): for j in range(reshape_y): img[i][j] = int(interp(weights[i][j], [par.w_min, par.w_max], [0, 255])) img = np.resize(img, (28, 28)) cv2.imwrite('weights/layer_' + str(layer) + '_neuron_' + str(num) + '.png', img) return img
Example #12
Source File: flow_utils.py From swiftnet with GNU General Public License v3.0 | 5 votes |
def subsample_flow(flow, subsampling): dtype = flow.dtype u, v = [pimg.fromarray(uv.squeeze()) for uv in np.split(flow / subsampling, 2, axis=-1)] size = tuple([int(round(wh / subsampling)) for wh in u.size]) u, v = u.resize(size), v.resize(size) return np.stack([np.array(u, dtype=dtype), np.array(v, dtype=dtype)], axis=-1)
Example #13
Source File: triangulate.py From fenics-topopt with MIT License | 5 votes |
def mesh_from_img(img): nv = (img.shape[0] + 1) * (img.shape[1] + 1) nf = img.size * 2 v_count = 0 f_count = 0 V_dict = {} V = np.zeros([nv, 2]) F = np.zeros([nf, 3], dtype=np.int) for i in range(img.shape[0]): for j in range(img.shape[1]): val = img[i, j] if val == 255.0: continue v_idx = [] for v_i in [(i, j), (i + 1, j), (i, j + 1), (i + 1, j + 1)]: if v_i in V_dict: v_idx.append(V_dict[v_i]) else: V_dict[v_i] = v_count V[v_count, :] = np.array((v_i[1], -v_i[0])) v_count += 1 v_idx.append(v_count - 1) v1, v2, v3, v4 = v_idx F[f_count, :] = np.array([v1, v2, v4]) F[f_count + 1, :] = np.array([v1, v4, v3]) f_count += 2 V = np.resize(V, [v_count, 2]) F = np.resize(F, [f_count, 3]) return V, F
Example #14
Source File: test_pairwise.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_check_different_dimensions(): # Ensure an error is raised if the dimensions are different. XA = np.resize(np.arange(45), (5, 9)) XB = np.resize(np.arange(32), (4, 8)) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XB = np.resize(np.arange(4 * 9), (4, 9)) assert_raises(ValueError, check_paired_arrays, XA, XB)
Example #15
Source File: test_pairwise.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_check_XB_returned(): # Ensure that if XA and XB are given correctly, they return as equal. # Check that if XB is not None, it is returned equal. # Note that the second dimension of XB is the same as XA. XA = np.resize(np.arange(40), (5, 8)) XB = np.resize(np.arange(32), (4, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) XB = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_paired_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked)
Example #16
Source File: ddpg.py From programmable-agents_tensorflow with MIT License | 5 votes |
def train(self): #print "train step",self.time_step # Sample a random minibatch of N transitions from replay buffer minibatch = self.replay_buffer.get_batch(BATCH_SIZE) state_batch = np.asarray([data[0] for data in minibatch]) program_order_batch = np.asarray([data[1] for data in minibatch]) action_batch = np.asarray([data[2] for data in minibatch]) reward_batch = np.asarray([data[3] for data in minibatch]) next_state_batch = np.asarray([data[4] for data in minibatch]) done_batch = np.asarray([data[5] for data in minibatch]) # for action_dim = 1 action_batch = np.resize(action_batch,[BATCH_SIZE,self.action_dim]) # Calculate y_batch next_action_batch = self.actor_network.target_actions(next_state_batch,program_order_batch) q_value_batch = self.critic_network.target_q(next_state_batch,next_action_batch,program_order_batch) y_batch = [] for i in range(len(minibatch)): if done_batch[i]: y_batch.append(reward_batch[i]) else : y_batch.append(reward_batch[i] + GAMMA * q_value_batch[i]) y_batch = np.resize(y_batch,[BATCH_SIZE,1]) # Update critic by minimizing the loss L self.critic_network.train(y_batch,state_batch,action_batch,program_order_batch) # Update the actor policy using the sampled gradient: action_batch_for_gradients = self.actor_network.actions(state_batch,program_order_batch) q_gradient_batch = self.critic_network.gradients(state_batch,action_batch_for_gradients,program_order_batch) self.actor_network.train(q_gradient_batch,state_batch,program_order_batch) # Update the target networks self.actor_network.update_target() self.critic_network.update_target()
Example #17
Source File: source.py From python-musical with MIT License | 5 votes |
def ringbuffer(data, length, decay=1.0, rate=44100): ''' Repeat data for 'length' amount of time, smoothing to reduce higher frequency oscillation. decay is the percent of amplitude decrease. ''' phase = len(data) length = int(rate * length) out = numpy.resize(data, length) for i in range(phase, length): index = i - phase out[i] = (out[index] + out[index + 1]) * 0.5 * decay return out
Example #18
Source File: preprocessing.py From kits19.MIScnn with GNU General Public License v3.0 | 5 votes |
def resize_patch(patches_list, patch_shape): for i, patch in enumerate(patches_list): patches_list[i] = np.resize(patch, (1,) + (patch_shape) + (1,)) return patches_list # Remove all blank patches (with only background)
Example #19
Source File: _support.py From Computable with MIT License | 5 votes |
def colex(a, indices, axis=1): """\nExtracts specified indices (a list) from passed array, along passed axis (column extraction is default). BEWARE: A 1D array is presumed to be a column-array (and that the whole array will be returned as a column). Returns: the columns of a specified by indices\n""" if type(indices) not in [ListType,TupleType,np.ndarray]: indices = [indices] if len(np.shape(a)) == 1: cols = np.resize(a,[a.shape[0],1]) else: cols = np.take(a,indices,axis) return cols
Example #20
Source File: _support.py From Computable with MIT License | 5 votes |
def abut(source, *args): # comment: except for the repetition, this is equivalent to hstack. """\nLike the |Stat abut command. It concatenates two arrays column-wise and returns the result. CAUTION: If one array is shorter, it will be repeated until it is as long as the other. Format: abut (source, args) where args=any # of arrays Returns: an array as long as the LONGEST array past, source appearing on the 'left', arrays in <args> attached on the 'right'.\n""" source = asarray(source) if len(source.shape) == 1: width = 1 source = np.resize(source,[source.shape[0],width]) else: width = source.shape[1] for addon in args: if len(addon.shape) == 1: width = 1 addon = np.resize(addon,[source.shape[0],width]) else: width = source.shape[1] if len(addon) < len(source): addon = np.resize(addon,[source.shape[0],addon.shape[1]]) elif len(source) < len(addon): source = np.resize(source,[addon.shape[0],source.shape[1]]) source = np.concatenate((source,addon),1) return source
Example #21
Source File: fitpack2.py From Computable with MIT License | 5 votes |
def _reset_nest(self, data, nest=None): n = data[10] if nest is None: k,m = data[5],len(data[0]) nest = m+k+1 # this is the maximum bound for nest else: if not n <= nest: raise ValueError("`nest` can only be increased") t, c, fpint, nrdata = [np.resize(data[n], nest) for n in [8,9,11,12]] args = data[:8] + (t,c,n,fpint,nrdata,data[13]) data = dfitpack.fpcurf1(*args) return data
Example #22
Source File: contour.py From Computable with MIT License | 5 votes |
def locate_label(self, linecontour, labelwidth): """ Find a good place to plot a label (relatively flat part of the contour). """ nsize = len(linecontour) if labelwidth > 1: xsize = int(np.ceil(nsize / labelwidth)) else: xsize = 1 if xsize == 1: ysize = nsize else: ysize = int(labelwidth) XX = np.resize(linecontour[:, 0], (xsize, ysize)) YY = np.resize(linecontour[:, 1], (xsize, ysize)) #I might have fouled up the following: yfirst = YY[:, 0].reshape(xsize, 1) ylast = YY[:, -1].reshape(xsize, 1) xfirst = XX[:, 0].reshape(xsize, 1) xlast = XX[:, -1].reshape(xsize, 1) s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst) L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel() dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1) x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth) #print 'ind, x, y', ind, x, y # There must be a more efficient way... lc = [tuple(l) for l in linecontour] dind = lc.index((x, y)) #print 'dind', dind #dind = list(linecontour).index((x,y)) return x, y, dind
Example #23
Source File: mrecords.py From Computable with MIT License | 5 votes |
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, copy=False, **options): # self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, strides=strides, formats=formats, names=names, titles=titles, byteorder=byteorder, aligned=aligned,) # mdtype = ma.make_mask_descr(self.dtype) if mask is nomask or not np.size(mask): if not keep_mask: self._mask = tuple([False] * len(mdtype)) else: mask = np.array(mask, copy=copy) if mask.shape != self.shape: (nd, nm) = (self.size, mask.size) if nm == 1: mask = np.resize(mask, self.shape) elif nm == nd: mask = np.reshape(mask, self.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MAError(msg % (nd, nm)) copy = True if not keep_mask: self.__setmask__(mask) self._sharedmask = True else: if mask.dtype == mdtype: _mask = mask else: _mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) self._mask = _mask return self #......................................................
Example #24
Source File: _numdiff.py From ip-nonlinear-solver with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub
Example #25
Source File: quaternion_time_series.py From quaternion with MIT License | 5 votes |
def append(self, row): self.n += 1 if self.n > self._a.shape[0]: self._a = np.resize(self._a, (2*self._a.shape[0],)+self._a.shape[1:]) self._a[self.n-1, ...] = row
Example #26
Source File: regression_generator.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _prepare_for_use(self): self._random_state = check_random_state(self.random_state) self.X, self.y = make_regression(n_samples=self.n_samples, n_features=self.n_features, n_informative=self.n_informative, n_targets=self.n_targets, random_state=self._random_state) self.y = np.resize(self.y, (self.y.size, self.n_targets)) self.target_names = ["target_" + str(i) for i in range(self.n_targets)] self.feature_names = ["att_num_" + str(i) for i in range(self.n_num_features)] self.target_values = [float] * self.n_targets
Example #27
Source File: test_numeric.py From vnpy_crypto with MIT License | 5 votes |
def test_zeroresize(self): A = np.array([[1, 2], [3, 4]]) Ar = np.resize(A, (0,)) assert_array_equal(Ar, np.array([])) assert_equal(A.dtype, Ar.dtype) Ar = np.resize(A, (0, 2)) assert_equal(Ar.shape, (0, 2)) Ar = np.resize(A, (2, 0)) assert_equal(Ar.shape, (2, 0))
Example #28
Source File: test_numeric.py From vnpy_crypto with MIT License | 5 votes |
def test_copies(self): A = np.array([[1, 2], [3, 4]]) Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) assert_equal(np.resize(A, (2, 4)), Ar1) Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(np.resize(A, (4, 2)), Ar2) Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) assert_equal(np.resize(A, (4, 3)), Ar3)
Example #29
Source File: mrecords.py From vnpy_crypto with MIT License | 5 votes |
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, copy=False, **options): self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, strides=strides, formats=formats, names=names, titles=titles, byteorder=byteorder, aligned=aligned,) mdtype = ma.make_mask_descr(self.dtype) if mask is nomask or not np.size(mask): if not keep_mask: self._mask = tuple([False] * len(mdtype)) else: mask = np.array(mask, copy=copy) if mask.shape != self.shape: (nd, nm) = (self.size, mask.size) if nm == 1: mask = np.resize(mask, self.shape) elif nm == nd: mask = np.reshape(mask, self.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MAError(msg % (nd, nm)) copy = True if not keep_mask: self.__setmask__(mask) self._sharedmask = True else: if mask.dtype == mdtype: _mask = mask else: _mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) self._mask = _mask return self
Example #30
Source File: show_progress.py From stn-ocr with GNU General Public License v3.0 | 5 votes |
def handle(self): data = self.rfile.read() data = json.loads(data.decode('utf-8')) width = data['width'] height = data['height'] data = np.fromstring(base64.b64decode(data['image']), dtype=np.uint8) data = np.resize(data, (height, width, 3)) image = Image.fromarray(data, mode='RGB') self.window.image = image