Python scipy.ndimage.gaussian_filter1d() Examples
The following are 30
code examples of scipy.ndimage.gaussian_filter1d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.ndimage
, or try the search function
.
Example #1
Source File: pycoQC_plot.py From pycoQC with GNU General Public License v3.0 | 6 votes |
def _compute_hist (data, x_scale="linear", smooth_sigma=2, nbins=200): # Count each categories in log or linear space min = np.nanmin(data) max = np.nanmax(data) if x_scale == "log": count_y, bins = np.histogram (a=data, bins=np.logspace (np.log10(min), np.log10(max)+0.1, nbins)) elif x_scale == "linear": count_y, bins = np.histogram (a=data, bins= np.linspace (min, max, nbins)) # Remove last bin from labels count_x = bins[1:] # Smooth results with a gaussian filter if smooth_sigma: count_y = gaussian_filter1d (count_y, sigma=smooth_sigma) # Convert to python list count_x = [float(i) for i in count_x] count_y = [float(i) for i in count_y] return (count_x, count_y)
Example #2
Source File: motion.py From 2D-Motion-Retargeting with MIT License | 6 votes |
def openpose2motion(json_dir, scale=1.0, smooth=True, max_frame=None): json_files = sorted(os.listdir(json_dir)) length = max_frame if max_frame is not None else len(json_files) // 8 * 8 json_files = json_files[:length] json_files = [os.path.join(json_dir, x) for x in json_files] motion = [] for path in json_files: with open(path) as f: jointDict = json.load(f) joint = np.array(jointDict['people'][0]['pose_keypoints_2d']).reshape((-1, 3))[:15, :2] if len(motion) > 0: joint[np.where(joint == 0)] = motion[-1][np.where(joint == 0)] motion.append(joint) for i in range(len(motion) - 1, 0, -1): motion[i - 1][np.where(motion[i - 1] == 0)] = motion[i][np.where(motion[i - 1] == 0)] motion = np.stack(motion, axis=2) if smooth: motion = gaussian_filter1d(motion, sigma=2, axis=-1) motion = motion * scale return motion
Example #3
Source File: reggui.py From suite2p with GNU General Public License v3.0 | 6 votes |
def load_zstack(self): name = QtGui.QFileDialog.getOpenFileName( self, "Open zstack", filter="*.tif" ) self.fname = name[0] try: self.zstack = imread(self.fname) self.zLy, self.zLx = self.zstack.shape[1:] self.Zedit.setValidator(QtGui.QIntValidator(0, self.zstack.shape[0])) self.zrange = [np.percentile(self.zstack,1), np.percentile(self.zstack,99)] self.computeZ.setEnabled(True) self.zloaded = True self.zbox.setEnabled(True) self.zbox.setChecked(True) if 'zcorr' in self.ops[0]: if self.zstack.shape[0]==self.ops[0]['zcorr'].shape[0]: zcorr = self.ops[0]['zcorr'] self.zmax = np.argmax(gaussian_filter1d(zcorr.T.copy(), 2, axis=1), axis=1) self.plot_zcorr() except Exception as e: print('ERROR: %s'%e)
Example #4
Source File: resonator.py From qkit with GNU General Public License v2.0 | 6 votes |
def set_prefilter(self,gaussian = False, median = False, params = []): self._do_prefilter_data = False if gaussian or median: self._do_prefilter_data = True #print gaussian, median if median: self._prefilter = median_filter #print("median_filter") if params: self._prefilter_params = params[0] else: self._prefilter_params = 6 if gaussian: self._prefilter = gaussian_filter1d #print("gaussian_filter1d") if params: self._prefilter_params = params[0] else: self._prefilter_params = 6 # 0.4
Example #5
Source File: compute_irl_convergence_rates.py From rl_swiss with MIT License | 6 votes |
def find_firsts(curve): curve = gaussian_filter1d(curve, STD) # fig, ax = plt.subplots(1) # ax.plot(curve) # ax.plot(curve) # ax.set_ylim([0, 8000]) # plt.savefig('plots/junk_vis/test_smoothing.png', bbox_inches='tight', dpi=300) # plt.close() firsts = [] for target in TARGETS: idxs = np.sort(np.asarray(curve > target).nonzero()[0]) if idxs.size == 0: firsts.append(-1) else: firsts.append(idxs[0]) return firsts
Example #6
Source File: qps_rings.py From qkit with GNU General Public License v2.0 | 6 votes |
def find_jumps2(self,ds,threshold=30000): self._prepare_find_jumps() ds = self._hf[ds] offset=ds[0] # first we remove a bit of noise #flt = gaussian_filter1d(ds,10) flt = median_filter(ds,size=10) #flt = ds # the sobel filter finds the "jumps" sb=sobel(flt) for i in sb: self.qps_jpn_hight.append(float(i)) for i in flt: self.qps_jpn_spec.append(float(i)) """ for i in xrange(flt.shape[0]-1): if(abs(sb[i])>threshold): offset -= sb[i] self.qps_jpn_spec.append(float(flt[i]-offset)) else: self.qps_jpn_spec.append(float(flt[i]-offset)) """ #for i in sb
Example #7
Source File: qps_rings.py From qkit with GNU General Public License v2.0 | 6 votes |
def find_jumps(self,ds, threshold = 40000): self._prepare_find_jumps() ds = self._hf[ds] ds = gaussian_filter1d(ds,2) offset=ds[0] jpnh = 0 for i in xrange(ds.shape[0]-3): #i +=3 #df=(((ds[i+1]+ds[i+2]+ds[i+3])/3.)-ds[i]) #df=(ds[i] - ((ds[i-1]+ds[i-2]+ds[i-3])/3.)) df=((ds[i+1])-ds[i]) if (abs(df)>threshold): self.qps_jpn_nr.append(1.) offset = offset-df jpnh = df #print df, offset self.qps_jpn_hight.append(abs(float(jpnh))) self.qps_jpn_spec.append(float(ds[i]+offset)) jpnh = df else: self.qps_jpn_nr.append(0.) #self.qps_jpn_hight.append(float(jpnh)) self.qps_jpn_spec.append(float(ds[i]+offset))
Example #8
Source File: region_growing.py From pyImSegm with BSD 3-Clause "New" or "Revised" License | 5 votes |
def compute_segm_object_shape(img_object, ray_step=5, interp_order=3, smooth_coef=0, shift_method='phase'): """ assuming single object in image and compute gravity centre and for this point compute Ray features and optionally: - interpolate missing values - smooth the Ray features :param ndarray img_object: binary segmentation of single object :param int ray_step: select the angular step for Ray features :param int interp_order: if None, no interpolation is performed :param float smooth_coef: smoothing the ray features :param str shift_method: use method for estimate shift maxima (phase or max) :return tuple(list(int), int): >>> img = np.zeros((100, 100)) >>> img[20:70, 30:80] = 1 >>> rays, shift = compute_segm_object_shape(img, ray_step=45) >>> rays # doctest: +ELLIPSIS [36.7..., 26.0..., 35.3..., 25.0..., 35.3..., 25.0..., 35.3..., 26.0...] """ centre = ndimage.measurements.center_of_mass(img_object) centre = [int(round(c)) for c in centre] ray_dist = compute_ray_features_segm_2d(img_object, centre, ray_step, 0, edge='down') if interp_order is not None and -1 in ray_dist: ray_dist = interpolate_ray_dist(ray_dist, interp_order) if smooth_coef > 0: ray_dist = ndimage.filters.gaussian_filter1d(ray_dist, smooth_coef) ray_dist, shift = shift_ray_features(ray_dist, shift_method) return ray_dist.tolist(), shift
Example #9
Source File: utils.py From suite2p with GNU General Public License v3.0 | 5 votes |
def resample_frames(y, x, xt): ''' resample y (defined at x) at times xt ''' ts = x.size / xt.size y = gaussian_filter1d(y, np.ceil(ts/2), axis=0) f = interp1d(x,y,fill_value="extrapolate") yt = f(xt) return yt
Example #10
Source File: reggui.py From suite2p with GNU General Public License v3.0 | 5 votes |
def compute_z(self): ops, zcorr = registration.compute_zpos(self.zstack, self.ops[0]) self.zmax = np.argmax(gaussian_filter1d(zcorr.T.copy(), 2, axis=1), axis=1) np.save(self.filename, ops) self.plot_zcorr()
Example #11
Source File: data_glob_speed.py From ronin with GNU General Public License v3.0 | 5 votes |
def __init__(self, seq_type, root_dir, data_list, cache_path=None, step_size=10, window_size=200, random_shift=0, transform=None, **kwargs): super().__init__() self.feature_dim = seq_type.feature_dim self.target_dim = seq_type.target_dim self.aux_dim = seq_type.aux_dim self.window_size = window_size self.step_size = step_size self.random_shift = random_shift self.transform = transform self.data_path = [osp.join(root_dir, data) for data in data_list] self.index_map = [] self.ts, self.orientations, self.gt_pos = [], [], [] self.features, self.targets, aux = load_cached_sequences( seq_type, root_dir, data_list, cache_path, interval=1, **kwargs) # Optionally smooth the sequence feat_sigma = kwargs.get('feature_sigma,', -1) targ_sigma = kwargs.get('target_sigma,', -1) if feat_sigma > 0: self.features = [gaussian_filter1d(feat, sigma=feat_sigma, axis=0) for feat in self.features] if targ_sigma > 0: self.targets = [gaussian_filter1d(targ, sigma=targ_sigma, axis=0) for targ in self.targets] for i in range(len(data_list)): self.ts.append(aux[i][:, 0]) self.orientations.append(aux[i][:, 1:5]) self.gt_pos.append(aux[i][:, -3:]) self.index_map += [[i, j] for j in range(window_size, self.targets[i].shape[0], step_size)] if kwargs.get('shuffle', True): random.shuffle(self.index_map)
Example #12
Source File: region_growing.py From pyImSegm with BSD 3-Clause "New" or "Revised" License | 5 votes |
def transform_rays_model_sets_mean_cdf_mixture(list_rays, nb_components=5, slic_size=15): """ compute the mixture model and transform it into cumulative distribution :param list(list(int)) list_rays: list ray features (distances) :param int nb_components: number components in mixture model :param int slic_size: superpixel size :return tuple(any,list(list(int))): mixture model, list of stat/param of models >>> np.random.seed(0) >>> list_rays = [[9, 4, 9], [4, 9, 7], [9, 7, 11], [10, 8, 10], ... [9, 11, 8], [4, 8, 5], [8, 10, 6], [9, 7, 11]] >>> mm, mean_cdf = transform_rays_model_sets_mean_cdf_mixture(list_rays, 2) >>> len(mean_cdf) 2 """ rays = np.array(list_rays) # mm = mixture.GaussianMixture(n_components=nb_components, # covariance_type='diag') mm = mixture.BayesianGaussianMixture(n_components=nb_components, covariance_type='diag') mm.fit(rays) logging.debug('Mixture model found % components with weights: %r', len(mm.weights_), mm.weights_) list_mean_cdf = [] # stds = mm.covariances_[:, np.eye(mm.means_.shape[1], dtype=bool)] # stds = mm.covariances_ # for covariance_type='diag' # diff_means = np.max(mm.means_, axis=0) - np.min(mm.means_, axis=0) for mean, covar in zip(mm.means_, mm.covariances_): std = np.sqrt(covar + 1) * 2 + slic_size mean = ndimage.gaussian_filter1d(mean, 1) std = ndimage.gaussian_filter1d(std, 1) max_dist = np.max(mean + 2 * std) cdist = compute_cumulative_distrib(np.array([mean]), np.array([std]), np.array([1]), max_dist) list_mean_cdf.append((mean.tolist(), cdist)) return mm, list_mean_cdf
Example #13
Source File: region_growing.py From pyImSegm with BSD 3-Clause "New" or "Revised" License | 5 votes |
def transform_rays_model_sets_mean_cdf_kmeans(list_rays, nb_components=5): """ compute the mixture model and transform it into cumulative distribution :param list(list(int)) list_rays: list ray features (distances) :param int nb_components: number components in mixture model :return tuple(any,list(list(int))): mixture model, list of stat/param of models >>> np.random.seed(0) >>> list_rays = [[9, 4, 9], [4, 9, 7], [9, 7, 11], [10, 8, 10], ... [9, 11, 8], [4, 8, 5], [8, 10, 6], [9, 7, 11]] >>> mm, mean_cdf = transform_rays_model_sets_mean_cdf_kmeans(list_rays, 2) >>> len(mean_cdf) 2 """ rays = np.array(list_rays) kmeans = cluster.KMeans(nb_components) kmeans.fit(rays) list_mean_cdf = [] means = kmeans.cluster_centers_ for lb, mean in enumerate(means): std = np.std(np.asarray(list_rays)[kmeans.labels_ == lb], axis=0) mean = ndimage.gaussian_filter1d(mean, 1) std = ndimage.gaussian_filter1d(std, 1) std = (std + 1) * 5. max_dist = np.max(mean + 2 * std) cdist = compute_cumulative_distrib(np.array([mean]), np.array([std]), np.array([1]), max_dist) list_mean_cdf.append((mean.tolist(), cdist)) return kmeans, list_mean_cdf
Example #14
Source File: region_growing.py From pyImSegm with BSD 3-Clause "New" or "Revised" License | 5 votes |
def transform_rays_model_cdf_spectral(list_rays, nb_components=5): """ compute the mixture model and transform it into cumulative distribution :param list(list(int)) list_rays: list ray features (distances) :param int nb_components: number components in mixture model :return tuple(any,list(list(int))): mixture model, list of stat/param of models >>> np.random.seed(0) >>> list_rays = [[9, 4, 9], [4, 9, 7], [9, 7, 11], [10, 8, 10], ... [9, 11, 8], [4, 8, 5], [8, 10, 6], [9, 7, 11]] >>> mm, cdist = transform_rays_model_cdf_spectral(list_rays) >>> np.round(cdist, 1).tolist() # doctest: +NORMALIZE_WHITESPACE [[1.0, 1.0, 1.0, 1.0, 1.0, 0.9, 0.8, 0.6, 0.5, 0.2, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 0.9, 0.9, 0.7, 0.5, 0.2, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 0.9, 0.8, 0.7, 0.5, 0.3, 0.0]] """ rays = np.array(list_rays) sc = cluster.SpectralClustering(nb_components) sc.fit(rays) logging.debug('SpectralClustering found % components with counts: %r', len(np.unique(sc.labels_)), np.bincount(sc.labels_)) labels = sc.labels_ means = np.zeros((len(np.unique(labels)), rays.shape[1])) stds = np.zeros((len(means), rays.shape[1])) for i, lb in enumerate(np.unique(labels)): means[i, :] = np.mean(np.asarray(list_rays)[labels == lb], axis=0) means[i, :] = ndimage.filters.gaussian_filter1d(means[i, :], 1) stds[i, :] = np.std(np.asarray(list_rays)[labels == lb], axis=0) stds += 1 weights = np.bincount(sc.labels_) / float(len(sc.labels_)) # compute the fairest mean + sigma over all components and ray angles max_dist = np.max([[m[i] + c[i] for i in range(len(m))] for m, c in zip(means, stds)]) cdist = compute_cumulative_distrib(means, stds, weights, max_dist) return sc, cdist.tolist()
Example #15
Source File: random.py From spm1d with GNU General Public License v3.0 | 5 votes |
def _smooth(self, y): return self.SCALE*gaussian_filter1d(y, self.SD, axis=1, mode='wrap')
Example #16
Source File: plot.py From RecNN with Apache License 2.0 | 5 votes |
def smooth_gauss(arr, var): return ndimage.gaussian_filter1d(arr, var)
Example #17
Source File: beamformers_electrodes_tweak.py From mmvt with GNU General Public License v3.0 | 5 votes |
def normalize_meg_data(meg_data, elec_data, from_t, to_t, sigma=0, norm_max=True): if sigma != 0: meg_data = gaussian_filter1d(meg_data, sigma) meg_data = meg_data[from_t:to_t] if norm_max: meg_data *= 1/max(meg_data) if not elec_data is None: meg_data -= meg_data[0] - elec_data[0] return meg_data
Example #18
Source File: beamformers_electrodes_tweak.py From mmvt with GNU General Public License v3.0 | 5 votes |
def normalize_elec_data(elec_data, from_t, to_t): elec_data = elec_data[from_t:to_t] elec_data = elec_data - min(elec_data) elec_data *= 1/max(elec_data) return elec_data # def smooth_meg_data(meg_data): # meg_data_all = {} # for sigma in [8, 10, 12]: # meg_data_all[sigma] = gaussian_filter1d(meg_data, sigma) # return meg_data_all # def check_electrodes(): # meg_data_all, elec_data_all = {}, {} # electrodes = ['LAT1', 'LAT2', 'LAT3', 'LAT4'] # vars = read_vars(events_id, None) # for cond, forward, evoked, epochs, data_cov, noise_cov, data_csd, noise_csd in vars: # for electrode in electrodes: # calc_electrode_fwd(MRI_SUBJECT, electrode, events_id, bipolar, overwrite_fwd=False) # forward = mne.read_forward_solution(get_cond_fname(FWD_X, cond, region=electrode)) #, surf_ori=True) # elec_data = load_electrode_msit_data(bipolar, electrode, BLENDER_SUB_FOL, positive=True, normalize_data=True) # meg_data = call_dics(forward, evoked, bipolar, noise_csd, data_csd, cond) # elec_data_norm, meg_data_norm = normalize_data(elec_data[cond], meg_data, from_t, to_t) # meg_data_norm = gaussian_filter1d(meg_data_norm, 10) # meg_data_all[electrode] = meg_data_norm # elec_data_all[electrode] = elec_data_norm # plot_activation_options(meg_data_all, elec_data_all, electrodes, 500, elec_opts=True)
Example #19
Source File: beamformers_electrodes_tweak.py From mmvt with GNU General Public License v3.0 | 5 votes |
def diff_rms(y, meg): # diffs_sum = sum(abs(np.diff(y) - np.diff(meg))) # diffs_sum = sum(abs(utils.diff_4pc(y) - utils.diff_4pc(meg))) # y = gaussian_filter1d(y, 3) diffs_sum = sum(abs(np.gradient(y) - np.gradient(meg))) rms = np.sum((y-meg)**2) max_abs = max(abs(y-meg)) if max_abs > 0.3: max_abs = np.inf if rms * 1/utils.max_min_diff(y) > 10: rms = np.inf return (diffs_sum + rms + max_abs) * 1/utils.max_min_diff(y)
Example #20
Source File: data_glob_heading.py From ronin with GNU General Public License v3.0 | 5 votes |
def __init__(self, seq_type, root_dir, data_list, cache_path=None, step_size=10, window_size=1000, random_shift=0, transform=None, **kwargs): super(HeadingDataset, self).__init__() self.seq_type = seq_type self.feature_dim = seq_type.feature_dim self.target_dim = seq_type.target_dim self.aux_dim = seq_type.aux_dim self.window_size = window_size self.step_size = step_size self.random_shift = random_shift self.transform = transform self.data_path = [osp.join(root_dir, data) for data in data_list] self.index_map = [] self.features, self.targets, self.velocities = load_cached_sequences( seq_type, root_dir, data_list, cache_path, **kwargs) # Optionally smooth the sequence feat_sigma = kwargs.get('feature_sigma,', -1) targ_sigma = kwargs.get('target_sigma,', -1) if feat_sigma > 0: self.features = [gaussian_filter1d(feat, sigma=feat_sigma, axis=0) for feat in self.features] if targ_sigma > 0: self.targets = [gaussian_filter1d(targ, sigma=targ_sigma, axis=0) for targ in self.targets] max_norm = kwargs.get('max_velocity_norm', 3.0) for i in range(len(data_list)): self.features[i] = self.features[i][:-1] self.targets[i] = self.targets[i][:-1] self.velocities[i] = self.velocities[i] velocity = np.linalg.norm(self.velocities[i], axis=1) # Remove outlier ground truth data bad_data = velocity > max_norm for j in range(window_size + random_shift, self.targets[i].shape[0], step_size): if not bad_data[j - window_size - random_shift:j + random_shift].any(): self.index_map.append([i, j]) if kwargs.get('shuffle', True): random.shuffle(self.index_map)
Example #21
Source File: ephysqc.py From ibllib with MIT License | 5 votes |
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3): """ Calculate approximate fraction of spikes missing from a distribution of amplitudes Assumes the amplitude histogram is symmetric (not valid in the presence of drift) Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705 Input: ------ amplitudes : numpy.ndarray Array of amplitudes (don't need to be in physical units) Output: ------- fraction_missing : float Fraction of missing spikes (0-0.5) If more than 50% of spikes are missing, an accurate estimate isn't possible """ h, b = np.histogram(amplitudes, num_histogram_bins, density=True) pdf = gaussian_filter1d(h, histogram_smoothing_value) support = b[:-1] peak_index = np.argmax(pdf) G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index bin_size = np.mean(np.diff(support)) fraction_missing = np.sum(pdf[G:]) * bin_size fraction_missing = np.min([fraction_missing, 0.5]) return fraction_missing
Example #22
Source File: test_filters.py From Computable with MIT License | 5 votes |
def test_orders_gauss(): # Check order inputs to Gaussians arr = np.zeros((1,)) yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0) yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3) yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1 yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4 yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0) yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3) yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1 yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4
Example #23
Source File: qps_rings.py From qkit with GNU General Public License v2.0 | 5 votes |
def split_traces(self,ds,threshold=30000): self._prepare_find_jumps() ds = self._hf[ds] # first we remove a bit of noise, size is the number of averages #flt = gaussian_filter1d(ds,10) flt = median_filter(ds,size=3) #flt = ds # the sobel filter finds the "jumps" sb=sobel(flt) for i in sb: self.qps_jpn_hight.append(float(i)) #for i in flt: self.qps_jpn_spec.append(float(i)) offset=ds[0] tr_num = 0 tr_name = "qps_tr_"+str(tr_num) tr_obj = self._hf.add_value_vector(tr_name, folder = 'analysis', x = self._x_co, unit = 'Hz') keepout = 4 for i,tr in enumerate(flt): keepout += 1 if abs(sb[i])>threshold and keepout>3: keepout = 0 # new trace tr_num +=1 tr_name = "qps_tr_"+str(tr_num) tr_obj = self._hf.add_value_vector(tr_name, folder = 'analysis', x = self._x_co, unit = 'Hz') print tr , i #tr_obj.append(float(tr)) else: if keepout>2: tr_obj.append(float(tr-offset))
Example #24
Source File: test_filters.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_gaussian_truncate(): # Test that Gaussian filters can be truncated at different widths. # These tests only check that the result has the expected number # of nonzero elements. arr = np.zeros((100, 100), float) arr[50, 50] = 1 num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum() assert_equal(num_nonzeros_2, 21**2) num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum() assert_equal(num_nonzeros_5, 51**2) # Test truncate when sigma is a sequence. f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5) fpos = f > 0 n0 = fpos.any(axis=0).sum() # n0 should be 2*int(2.5*3.5 + 0.5) + 1 assert_equal(n0, 19) n1 = fpos.any(axis=1).sum() # n1 should be 2*int(0.5*3.5 + 0.5) + 1 assert_equal(n1, 5) # Test gaussian_filter1d. x = np.zeros(51) x[25] = 1 f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5) n = (f > 0).sum() assert_equal(n, 15) # Test gaussian_laplace y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5) nonzero_indices = np.where(y != 0)[0] n = nonzero_indices.ptp() + 1 assert_equal(n, 15) # Test gaussian_gradient_magnitude y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5) nonzero_indices = np.where(y != 0)[0] n = nonzero_indices.ptp() + 1 assert_equal(n, 15)
Example #25
Source File: test_filters.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_multiple_modes_sequentially(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying the filters with # different modes sequentially arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) modes = ['reflect', 'wrap'] expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0]) expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1]) assert_equal(expected, sndi.gaussian_filter(arr, 1, mode=modes)) expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0]) expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1]) assert_equal(expected, sndi.uniform_filter(arr, 5, mode=modes)) expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0]) expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1]) assert_equal(expected, sndi.maximum_filter(arr, size=5, mode=modes)) expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0]) expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1]) assert_equal(expected, sndi.minimum_filter(arr, size=5, mode=modes))
Example #26
Source File: test_filters.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_orders_gauss(): # Check order inputs to Gaussians arr = np.zeros((1,)) assert_equal(0, sndi.gaussian_filter(arr, 1, order=0)) assert_equal(0, sndi.gaussian_filter(arr, 1, order=3)) assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1) assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)) assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)) assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
Example #27
Source File: evaluate.py From 2D-Motion-Retargeting with MIT License | 4 votes |
def handle2x(config, args): w1 = h1 = w2 = h2 = 512 # load trained model net = get_autoencoder(config) net.load_state_dict(torch.load(args.model_path)) net.to(config.device) net.eval() # mean/std pose mean_pose, std_pose = get_meanpose(config) # get input dataloder = get_dataloader('test', config) v1 = VIEW_ANGLES[args.view1] if args.view1 is not None else None v2 = VIEW_ANGLES[args.view2] if args.view2 is not None else None input1 = dataloder.dataset.preprocessing(args.path1, v1).unsqueeze(0) input2 = dataloder.dataset.preprocessing(args.path2, v2).unsqueeze(0) input1 = input1.to(config.device) input2 = input2.to(config.device) # transfer by network out12 = net.transfer(input1, input2) out21 = net.transfer(input2, input1) # postprocessing the outputs input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2, h1 // 2) input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2, h2 // 2) out12 = postprocess_motion2d(out12, mean_pose, std_pose, w2 // 2, h2 // 2) out21 = postprocess_motion2d(out21, mean_pose, std_pose, w1 // 2, h1 // 2) if not args.disable_smooth: out12 = gaussian_filter1d(out12, sigma=2, axis=-1) out21 = gaussian_filter1d(out21, sigma=2, axis=-1) if args.out_dir is not None: save_dir = args.out_dir ensure_dir(save_dir) color1 = hex2rgb(args.color1) color2 = hex2rgb(args.color2) np.savez(os.path.join(save_dir, 'results.npz'), input1=input1, input2=input2, out12=out12, out21=out21) if args.render_video: print("Generating videos...") motion2video(input1, h1, w1, os.path.join(save_dir, 'input1.mp4'), color1, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(input2, h2, w2, os.path.join(save_dir,'input2.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(out12, h2, w2, os.path.join(save_dir,'out12.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(out21, h1, w1, os.path.join(save_dir,'out21.mp4'), color1, args.transparency, fps=args.fps, save_frame=args.save_frame) print("Done.")
Example #28
Source File: evaluate.py From 2D-Motion-Retargeting with MIT License | 4 votes |
def handle3x(config, args): # resize input h1, w1, scale1 = pad_to_height(config.img_size[0], args.img1_height, args.img1_width) h2, w2, scale2 = pad_to_height(config.img_size[0], args.img2_height, args.img2_width) h3, w3, scale3 = pad_to_height(config.img_size[0], args.img2_height, args.img3_width) # load trained model net = get_autoencoder(config) net.load_state_dict(torch.load(args.model_path)) net.to(config.device) net.eval() # mean/std pose mean_pose, std_pose = get_meanpose(config) # get input input1 = openpose2motion(args.vid1_json_dir, scale=scale1, max_frame=args.max_length) input2 = openpose2motion(args.vid2_json_dir, scale=scale2, max_frame=args.max_length) input3 = openpose2motion(args.vid3_json_dir, scale=scale3, max_frame=args.max_length) input1 = preprocess_motion2d(input1, mean_pose, std_pose) input2 = preprocess_motion2d(input2, mean_pose, std_pose) input3 = preprocess_motion2d(input3, mean_pose, std_pose) input1 = input1.to(config.device) input2 = input2.to(config.device) input3 = input3.to(config.device) # transfer by network out = net.transfer_three(input1, input2, input3) # postprocessing the outputs input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2, h1 // 2) input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2, h2 // 2) input3 = postprocess_motion2d(input3, mean_pose, std_pose, w2 // 2, h2 // 2) out = postprocess_motion2d(out, mean_pose, std_pose, w2 // 2, h2 // 2) if not args.disable_smooth: out = gaussian_filter1d(out, sigma=2, axis=-1) if args.out_dir is not None: save_dir = args.out_dir ensure_dir(save_dir) color1 = hex2rgb(args.color1) color2 = hex2rgb(args.color2) color3 = hex2rgb(args.color3) np.savez(os.path.join(save_dir, 'results.npz'), input1=input1, input2=input2, input3=input3, out=out) if args.render_video: print("Generating videos...") motion2video(input1, h1, w1, os.path.join(save_dir,'input1.mp4'), color1, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(input2, h2, w2, os.path.join(save_dir,'input2.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(input3, h3, w3, os.path.join(save_dir,'input3.mp4'), color3, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(out, h2, w2, os.path.join(save_dir,'out.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) print("Done.")
Example #29
Source File: predict.py From 2D-Motion-Retargeting with MIT License | 4 votes |
def handle2x(config, args): # resize input h1, w1, scale1 = pad_to_height(config.img_size[0], args.img1_height, args.img1_width) h2, w2, scale2 = pad_to_height(config.img_size[0], args.img2_height, args.img2_width) # load trained model net = get_autoencoder(config) net.load_state_dict(torch.load(args.model_path)) net.to(config.device) net.eval() # mean/std pose mean_pose, std_pose = get_meanpose(config) # get input input1 = openpose2motion(args.vid1_json_dir, scale=scale1, max_frame=args.max_length) input2 = openpose2motion(args.vid2_json_dir, scale=scale2, max_frame=args.max_length) input1 = preprocess_motion2d(input1, mean_pose, std_pose) input2 = preprocess_motion2d(input2, mean_pose, std_pose) input1 = input1.to(config.device) input2 = input2.to(config.device) # transfer by network out12 = net.transfer(input1, input2) out21 = net.transfer(input2, input1) # postprocessing the outputs input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2, h1 // 2) input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2, h2 // 2) out12 = postprocess_motion2d(out12, mean_pose, std_pose, w2 // 2, h2 // 2) out21 = postprocess_motion2d(out21, mean_pose, std_pose, w1 // 2, h1 // 2) if not args.disable_smooth: out12 = gaussian_filter1d(out12, sigma=2, axis=-1) out21 = gaussian_filter1d(out21, sigma=2, axis=-1) if args.out_dir is not None: save_dir = args.out_dir ensure_dir(save_dir) color1 = hex2rgb(args.color1) color2 = hex2rgb(args.color2) np.savez(os.path.join(save_dir, 'results.npz'), input1=input1, input2=input2, out12=out12, out21=out21) if args.render_video: print("Generating videos...") motion2video(input1, h1, w1, os.path.join(save_dir, 'input1.mp4'), color1, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(input2, h2, w2, os.path.join(save_dir,'input2.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(out12, h2, w2, os.path.join(save_dir,'out12.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(out21, h1, w1, os.path.join(save_dir,'out21.mp4'), color1, args.transparency, fps=args.fps, save_frame=args.save_frame) print("Done.")
Example #30
Source File: predict.py From 2D-Motion-Retargeting with MIT License | 4 votes |
def handle3x(config, args): # resize input h1, w1, scale1 = pad_to_height(config.img_size[0], args.img1_height, args.img1_width) h2, w2, scale2 = pad_to_height(config.img_size[0], args.img2_height, args.img2_width) h3, w3, scale3 = pad_to_height(config.img_size[0], args.img2_height, args.img3_width) # load trained model net = get_autoencoder(config) net.load_state_dict(torch.load(args.model_path)) net.to(config.device) net.eval() # mean/std pose mean_pose, std_pose = get_meanpose(config) # get input input1 = openpose2motion(args.vid1_json_dir, scale=scale1, max_frame=args.max_length) input2 = openpose2motion(args.vid2_json_dir, scale=scale2, max_frame=args.max_length) input3 = openpose2motion(args.vid3_json_dir, scale=scale3, max_frame=args.max_length) input1 = preprocess_motion2d(input1, mean_pose, std_pose) input2 = preprocess_motion2d(input2, mean_pose, std_pose) input3 = preprocess_motion2d(input3, mean_pose, std_pose) input1 = input1.to(config.device) input2 = input2.to(config.device) input3 = input3.to(config.device) # transfer by network out = net.transfer_three(input1, input2, input3) # postprocessing the outputs input1 = postprocess_motion2d(input1, mean_pose, std_pose, w1 // 2, h1 // 2) input2 = postprocess_motion2d(input2, mean_pose, std_pose, w2 // 2, h2 // 2) input3 = postprocess_motion2d(input3, mean_pose, std_pose, w2 // 2, h2 // 2) out = postprocess_motion2d(out, mean_pose, std_pose, w2 // 2, h2 // 2) if not args.disable_smooth: out = gaussian_filter1d(out, sigma=2, axis=-1) if args.out_dir is not None: save_dir = args.out_dir ensure_dir(save_dir) color1 = hex2rgb(args.color1) color2 = hex2rgb(args.color2) color3 = hex2rgb(args.color3) np.savez(os.path.join(save_dir, 'results.npz'), input1=input1, input2=input2, input3=input3, out=out) if args.render_video: print("Generating videos...") motion2video(input1, h1, w1, os.path.join(save_dir,'input1.mp4'), color1, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(input2, h2, w2, os.path.join(save_dir,'input2.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(input3, h3, w3, os.path.join(save_dir,'input3.mp4'), color3, args.transparency, fps=args.fps, save_frame=args.save_frame) motion2video(out, h2, w2, os.path.join(save_dir,'out.mp4'), color2, args.transparency, fps=args.fps, save_frame=args.save_frame) print("Done.")