Python scipy.ndimage.generic_filter() Examples
The following are 13
code examples of scipy.ndimage.generic_filter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.ndimage
, or try the search function
.
Example #1
Source File: test_ndimage.py From Computable with MIT License | 6 votes |
def test_generic_filter01(self): filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]]) footprint = numpy.array([[1, 0], [0, 1]]) cf = numpy.array([1., 4.]) def _filter_func(buffer, weights, total=1.0): weights = cf / total return (buffer * weights).sum() for type in self.types: a = numpy.arange(12, dtype=type) a.shape = (3,4) r1 = ndimage.correlate(a, filter_ * footprint) if type in self.float_types: r1 /= 5 else: r1 //= 5 r2 = ndimage.generic_filter(a, _filter_func, footprint=footprint, extra_arguments=(cf,), extra_keywords={'total': cf.sum()}) assert_array_almost_equal(r1, r2)
Example #2
Source File: test_filters.py From Computable with MIT License | 6 votes |
def test_valid_origins(): """Regression test for #1311.""" func = lambda x: np.mean(x) data = np.array([1,2,3,4,5], dtype=np.float64) assert_raises(ValueError, sndi.generic_filter, data, func, size=3, origin=2) func2 = lambda x, y: np.mean(x + y) assert_raises(ValueError, sndi.generic_filter1d, data, func, filter_size=3, origin=2) assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3, origin=2) for filter in [sndi.uniform_filter, sndi.minimum_filter, sndi.maximum_filter, sndi.maximum_filter1d, sndi.median_filter, sndi.minimum_filter1d]: # This should work, since for size == 3, the valid range for origin is # -1 to 1. list(filter(data, 3, origin=-1)) list(filter(data, 3, origin=1)) # Just check this raises an error instead of silently accepting or # segfaulting. assert_raises(ValueError, filter, data, 3, origin=2)
Example #3
Source File: test_ndimage.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_generic_filter01(self): filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]]) footprint = numpy.array([[1, 0], [0, 1]]) cf = numpy.array([1., 4.]) def _filter_func(buffer, weights, total=1.0): weights = cf / total return (buffer * weights).sum() for type_ in self.types: a = numpy.arange(12, dtype=type_) a.shape = (3, 4) r1 = ndimage.correlate(a, filter_ * footprint) if type_ in self.float_types: r1 /= 5 else: r1 //= 5 r2 = ndimage.generic_filter( a, _filter_func, footprint=footprint, extra_arguments=(cf,), extra_keywords={'total': cf.sum()}) assert_array_almost_equal(r1, r2)
Example #4
Source File: test_filters.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_valid_origins(): """Regression test for #1311.""" func = lambda x: np.mean(x) data = np.array([1,2,3,4,5], dtype=np.float64) assert_raises(ValueError, sndi.generic_filter, data, func, size=3, origin=2) func2 = lambda x, y: np.mean(x + y) assert_raises(ValueError, sndi.generic_filter1d, data, func, filter_size=3, origin=2) assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3, origin=2) for filter in [sndi.uniform_filter, sndi.minimum_filter, sndi.maximum_filter, sndi.maximum_filter1d, sndi.median_filter, sndi.minimum_filter1d]: # This should work, since for size == 3, the valid range for origin is # -1 to 1. list(filter(data, 3, origin=-1)) list(filter(data, 3, origin=1)) # Just check this raises an error instead of silently accepting or # segfaulting. assert_raises(ValueError, filter, data, 3, origin=2)
Example #5
Source File: test_c_api.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_generic_filter(): def filter2d(footprint_elements, weights): return (weights*footprint_elements).sum() def check(j): func = FILTER2D_FUNCTIONS[j] im = np.ones((20, 20)) im[:10,:10] = 0 footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) footprint_size = np.count_nonzero(footprint) weights = np.ones(footprint_size)/footprint_size res = ndimage.generic_filter(im, func(weights), footprint=footprint) std = ndimage.generic_filter(im, filter2d, footprint=footprint, extra_arguments=(weights,)) assert_allclose(res, std, err_msg="#{} failed".format(j)) for j, func in enumerate(FILTER2D_FUNCTIONS): check(j)
Example #6
Source File: value-iteration.py From ML-Recipes with BSD 2-Clause "Simplified" License | 5 votes |
def solve(Z, start, goal): Z = 1 - Z G = np.zeros(Z.shape) G[start] = 1 # We iterate until value at exit is > 0. This requires the maze # to have a solution or it will be stuck in the loop. def diffuse(Z, gamma=0.99): return max(gamma*Z[0], gamma*Z[1], Z[2], gamma*Z[3], gamma*Z[4]) G_gamma = np.empty_like(G) while G[goal] == 0.0: G = Z * generic_filter(G, diffuse, footprint=[[0, 1, 0], [1, 1, 1], [0, 1, 0]]) # Descent gradient to find shortest path from entrance to exit y, x = goal dirs = (0,-1), (0,+1), (-1,0), (+1,0) P = [] while (x, y) != start: P.append((y,x)) neighbours = [-1, -1, -1, -1] if x > 0: neighbours[0] = G[y, x-1] if x < G.shape[1]-1: neighbours[1] = G[y, x+1] if y > 0: neighbours[2] = G[y-1, x] if y < G.shape[0]-1: neighbours[3] = G[y+1, x] a = np.argmax(neighbours) x, y = x + dirs[a][1], y + dirs[a][0] P.append((y,x)) return P
Example #7
Source File: test_filters.py From Computable with MIT License | 5 votes |
def test_ticket_701(): # Test generic filter sizes arr = np.arange(4).reshape((2,2)) func = lambda x: np.min(x) res = sndi.generic_filter(arr, func, size=(1,1)) # The following raises an error unless ticket 701 is fixed res2 = sndi.generic_filter(arr, func, size=1) assert_equal(res, res2)
Example #8
Source File: test_filters.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_ticket_701(): # Test generic filter sizes arr = np.arange(4).reshape((2,2)) func = lambda x: np.min(x) res = sndi.generic_filter(arr, func, size=(1,1)) # The following raises an error unless ticket 701 is fixed res2 = sndi.generic_filter(arr, func, size=1) assert_equal(res, res2)
Example #9
Source File: dnn.py From ml-five with MIT License | 5 votes |
def _neighbor_count(self, board, who): footprint = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]]) return ndimage.generic_filter(board, lambda r: np.count_nonzero(r == who), footprint=footprint, mode='constant')
Example #10
Source File: binarize_dibco.py From binarization_2017 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def relative_darkness(im, window_size=5, threshold=10): if im.ndim == 3: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # find number of pixels at least $threshold less than the center value def below_thresh(vals): center_val = vals[vals.shape[0]/2] lower_thresh = center_val - threshold return (vals < lower_thresh).sum() # find number of pixels at least $threshold greater than the center value def above_thresh(vals): center_val = vals[vals.shape[0]/2] above_thresh = center_val + threshold return (vals > above_thresh).sum() # apply the above function convolutionally lower = nd.generic_filter(im, below_thresh, size=window_size, mode='reflect') upper = nd.generic_filter(im, above_thresh, size=window_size, mode='reflect') # number of values within $threshold of the center value is the remainder # constraint: lower + middle + upper = window_size ** 2 middle = np.empty_like(lower) middle.fill(window_size*window_size) middle = middle - (lower + upper) # scale to range [0-255] lower = lower * (255 / (window_size * window_size)) middle = middle * (255 / (window_size * window_size)) upper = upper * (255 / (window_size * window_size)) return np.concatenate( [lower[:,:,np.newaxis], middle[:,:,np.newaxis], upper[:,:,np.newaxis]], axis=2)
Example #11
Source File: binarize_plm.py From binarization_2017 with BSD 3-Clause "New" or "Revised" License | 5 votes |
def relative_darkness(im, window_size=5, threshold=10): if im.ndim == 3: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # find number of pixels at least $threshold less than the center value def below_thresh(vals): center_val = vals[vals.shape[0]/2] lower_thresh = center_val - threshold return (vals < lower_thresh).sum() # find number of pixels at least $threshold greater than the center value def above_thresh(vals): center_val = vals[vals.shape[0]/2] above_thresh = center_val + threshold return (vals > above_thresh).sum() # apply the above function convolutionally lower = nd.generic_filter(im, below_thresh, size=window_size, mode='reflect') upper = nd.generic_filter(im, above_thresh, size=window_size, mode='reflect') # number of values within $threshold of the center value is the remainder # constraint: lower + middle + upper = window_size ** 2 middle = np.empty_like(lower) middle.fill(window_size*window_size) middle = middle - (lower + upper) # scale to range [0-255] lower = lower * (255 / (window_size * window_size)) middle = middle * (255 / (window_size * window_size)) upper = upper * (255 / (window_size * window_size)) return np.concatenate( [lower[:,:,np.newaxis], middle[:,:,np.newaxis], upper[:,:,np.newaxis]], axis=2)
Example #12
Source File: sasma.py From unmixing with MIT License | 4 votes |
def interpolate_endmember_spectra(em_map, window, cval=0, nodata=-9999): ''' Spatially interpolates a single-band image using the given window; not intended for direct use, rather, it is a module-level function for use in a ProcessPoolExecutor's context as part of interpolate_endmember_map(). Arguments: em_map A single-band raster array with most, but not all, pixels masked; these are interpolated from the values of the unmasked pixels. window A square array representing a moving window. cval The constant value to use outside of the em_map array; should be set to zero for proper interpolation of endmember spectra. ''' shp = em_map.shape w = np.max(window.shape) # Assume square window; longest of any equal side window = np.ravel(window) # For performance, used raveled arrays em_avg_map = generic_filter( # Fill NoData with zero --> no contribution to spatial sum np.where(em_map[0,...] == nodata, cval, em_map[0,...]), # Multiply em_map in window by weights, then divide by # the sum of weights in those non-zero areas lambda x: np.sum(np.multiply(x, window)) / np.sum( np.multiply(np.where(x == cval, 0, 1), window)), mode = 'constant', cval = cval, footprint = np.ones((w,w))) return em_avg_map.reshape((1, shp[1], shp[2]))
Example #13
Source File: postprocess_utils.py From coded with MIT License | 4 votes |
def get_geom_feats(config, array, before_class, input): """ Add extra bands to the array for: # 1. Max magnitude in X pixel window # 2. Min magnitude in X pixel window # 3. Mean magnitude in X pixel window # 4+ TODO: area, shape, etc? """ mag_band = config['general']['mag_band'] - 1 mag = array[mag_band,:,:] forestlabel = int(config['classification']['forestlabel']) # create window before_class = before_class.astype(np.float) before_class[before_class == forestlabel] = np.nan before_class[before_class == 0] = np.nan window = config['postprocessing']['deg_class']['window_size'] max_mag = ndimage.generic_filter(before_class, np.nanmax, size=window) max_mag[np.isnan(max_mag)] = 0 save_raster_simple(max_mag, input, 'test_classwindow.tif') dim1, dim2, dim3 = np.shape(array) newar = np.zeros((dim1+3,dim2,dim3)) newar[0:dim1,:,:] = array newar[-3,:,:] = max_mag newar[-2,:,:] = min_mag newar[-1,:,:] = mean_mag newar[-3,:,:][newar[0, :, :] == 0] = 0 newar[-2,:,:][newar[0, :, :] == 0] = 0 newar[-1,:,:][newar[0, :, :] == 0] = 0 return newar