Python numpy.interp() Examples
The following are 30
code examples of numpy.interp().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: dynamic.py From StructEngPy with MIT License | 6 votes |
def spectrum_analysis(model,n,spec): """ sepctrum analysis params: n: number of modes to use\n spec: a list of tuples (period,acceleration response) """ freq,mode=eigen_mode(model,n) M_=np.dot(mode.T,model.M) M_=np.dot(M_,mode) K_=np.dot(mode.T,model.K) K_=np.dot(K_,mode) C_=np.dot(mode.T,model.C) C_=np.dot(C_,mode) d_=[] for (m_,k_,c_) in zip(M_.diag(),K_.diag(),C_.diag()): sdof=SDOFSystem(m_,k_) T=sdof.omega_d() d_.append(np.interp(T,spec[0],spec[1]*m_)) d=np.dot(d_,mode) #CQC return d
Example #2
Source File: track_lib.py From TNT with GNU General Public License v3.0 | 6 votes |
def interp_batch(total_batch_x): interp_batch_x = total_batch_x.copy() N_batch = total_batch_x.shape[0] for n in range(N_batch): temp_idx = np.where(total_batch_x[n,0,:,1]==1)[0] t1 = int(temp_idx[-1]) temp_idx = np.where(total_batch_x[n,0,:,2]==1)[0] t2 = int(temp_idx[0]) if t2-t1<=1: continue interp_t = np.array(range(t1+1,t2)) for k in range(total_batch_x.shape[1]): #temp_std = np.std(total_batch_x[n,k,total_batch_x[n,k,:,0]!=0,0]) temp_std1 = np.std(total_batch_x[n,k,total_batch_x[n,0,:,1]!=0,0]) temp_std2 = np.std(total_batch_x[n,k,total_batch_x[n,0,:,2]!=0,0]) x_p = [t1,t2] f_p = [total_batch_x[n,k,t1,0],total_batch_x[n,k,t2,0]] #interp_batch_x[n,k,t1+1:t2,0] = np.interp(interp_t,x_p,f_p)#+np.random.normal(0, temp_std, t2-t1-1) interp_batch_x[n,k,t1+1:t2,0] = np.interp(interp_t,x_p,f_p)+np.random.normal(0, (temp_std1+temp_std2)*0.5, t2-t1-1) return interp_batch_x
Example #3
Source File: test_function_base.py From lambda-packs with MIT License | 6 votes |
def test_complex_interp(self): # test complex interpolation x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j x0 = 0.3 y0 = x0 + (1+x0)*1.0j assert_almost_equal(np.interp(x0, x, y), y0) # test complex left and right x0 = -1 left = 2 + 3.0j assert_almost_equal(np.interp(x0, x, y, left=left), left) x0 = 2.0 right = 2 + 3.0j assert_almost_equal(np.interp(x0, x, y, right=right), right) # test complex periodic x = [-180, -170, -185, 185, -10, -5, 0, 365] xp = [190, -190, 350, -350] fp = [5+1.0j, 10+2j, 3+3j, 4+4j] y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, 3.5+3.5j, 3.75+3.75j] assert_almost_equal(np.interp(x, xp, fp, period=360), y)
Example #4
Source File: qrnn.py From typhon with MIT License | 6 votes |
def sample_posterior(self, x, n=1): r""" Generates :code:`n` samples from the estimated posterior distribution for the input vector :code:`x`. The sampling is performed by the inverse CDF method using the estimated CDF obtained from the :code:`cdf` member function. Arguments: x(np.array): Array of shape `(n, m)` containing `n` inputs for which to predict the conditional quantiles. n(int): The number of samples to generate. Returns: Tuple (xs, fs) containing the :math: `x`-values in `xs` and corresponding values of the posterior CDF :math: `F(x)` in `fs`. """ y_pred, qs = self.cdf(x) p = np.random.rand(n) y = np.interp(p, qs, y_pred) return y
Example #5
Source File: train_cnn_trajectory_2d.py From TNT with GNU General Public License v3.0 | 6 votes |
def interp_batch(total_batch_x): interp_batch_x = total_batch_x.copy() N_batch = total_batch_x.shape[0] for n in range(N_batch): temp_idx = np.where(total_batch_x[n,0,:,1]==1)[0] t1 = int(temp_idx[-1]) temp_idx = np.where(total_batch_x[n,0,:,2]==1)[0] t2 = int(temp_idx[0]) if t2-t1<=1: continue interp_t = np.array(range(t1+1,t2)) for k in range(total_batch_x.shape[1]): #temp_std = np.std(total_batch_x[n,k,total_batch_x[n,k,:,0]!=0,0]) temp_std1 = np.std(total_batch_x[n,k,total_batch_x[n,0,:,1]!=0,0]) temp_std2 = np.std(total_batch_x[n,k,total_batch_x[n,0,:,2]!=0,0]) x_p = [t1,t2] f_p = [total_batch_x[n,k,t1,0],total_batch_x[n,k,t2,0]] #************************************* #interp_batch_x[n,k,t1+1:t2,0] = np.interp(interp_t,x_p,f_p)+np.random.normal(0, temp_std, t2-t1-1) #************************************* interp_batch_x[n,k,t1+1:t2,0] = np.interp(interp_t,x_p,f_p)+np.random.normal(0, (temp_std1+temp_std2)*0.5, t2-t1-1) return interp_batch_x
Example #6
Source File: test_missing.py From recruit with Apache License 2.0 | 6 votes |
def test_interpolate_index_values(self): s = Series(np.nan, index=np.sort(np.random.rand(30))) s[::3] = np.random.randn(10) vals = s.index.values.astype(float) result = s.interpolate(method='index') expected = s.copy() bad = isna(expected.values) good = ~bad expected = Series(np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]) assert_series_equal(result[bad], expected) # 'values' is synonymous with 'index' for the method kwarg other_result = s.interpolate(method='values') assert_series_equal(other_result, result) assert_series_equal(other_result[bad], expected)
Example #7
Source File: dataset.py From HorizonNet with MIT License | 6 votes |
def cor_2_1d(cor, H, W): bon_ceil_x, bon_ceil_y = [], [] bon_floor_x, bon_floor_y = [], [] n_cor = len(cor) for i in range(n_cor // 2): xys = panostretch.pano_connect_points(cor[i*2], cor[(i*2+2) % n_cor], z=-50, w=W, h=H) bon_ceil_x.extend(xys[:, 0]) bon_ceil_y.extend(xys[:, 1]) for i in range(n_cor // 2): xys = panostretch.pano_connect_points(cor[i*2+1], cor[(i*2+3) % n_cor], z=50, w=W, h=H) bon_floor_x.extend(xys[:, 0]) bon_floor_y.extend(xys[:, 1]) bon_ceil_x, bon_ceil_y = sort_xy_filter_unique(bon_ceil_x, bon_ceil_y, y_small_first=True) bon_floor_x, bon_floor_y = sort_xy_filter_unique(bon_floor_x, bon_floor_y, y_small_first=False) bon = np.zeros((2, W)) bon[0] = np.interp(np.arange(W), bon_ceil_x, bon_ceil_y, period=W) bon[1] = np.interp(np.arange(W), bon_floor_x, bon_floor_y, period=W) bon = ((bon + 0.5) / H - 0.5) * np.pi return bon
Example #8
Source File: track_lib.py From TNT with GNU General Public License v3.0 | 6 votes |
def interp_batch(total_batch_x): interp_batch_x = total_batch_x.copy() N_batch = total_batch_x.shape[0] for n in range(N_batch): temp_idx = np.where(total_batch_x[n,0,:,1]==1)[0] t1 = int(temp_idx[-1]) temp_idx = np.where(total_batch_x[n,0,:,2]==1)[0] t2 = int(temp_idx[0]) if t2-t1<=1: continue interp_t = np.array(range(t1+1,t2)) for k in range(total_batch_x.shape[1]): #temp_std = np.std(total_batch_x[n,k,total_batch_x[n,k,:,0]!=0,0]) temp_std1 = np.std(total_batch_x[n,k,total_batch_x[n,0,:,1]!=0,0]) temp_std2 = np.std(total_batch_x[n,k,total_batch_x[n,0,:,2]!=0,0]) x_p = [t1,t2] f_p = [total_batch_x[n,k,t1,0],total_batch_x[n,k,t2,0]] #interp_batch_x[n,k,t1+1:t2,0] = np.interp(interp_t,x_p,f_p)#+np.random.normal(0, temp_std, t2-t1-1) interp_batch_x[n,k,t1+1:t2,0] = np.interp(interp_t,x_p,f_p)+np.random.normal(0, (temp_std1+temp_std2)*0.5, t2-t1-1) return interp_batch_x
Example #9
Source File: test_function_base.py From recruit with Apache License 2.0 | 6 votes |
def test_zero_dimensional_interpolation_point(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.array(.3) assert_almost_equal(np.interp(x0, x, y), x0) xp = np.array([0, 2, 4]) fp = np.array([1, -1, 1]) actual = np.interp(np.array(1), xp, fp) assert_equal(actual, 0) assert_(isinstance(actual, np.float64)) actual = np.interp(np.array(4.5), xp, fp, period=4) assert_equal(actual, 0.5) assert_(isinstance(actual, np.float64))
Example #10
Source File: imgutil.py From Depth-Map-Prediction with GNU General Public License v3.0 | 6 votes |
def colormap(x, m=None, M=None, center=0, colors=None): '''color a grayscale array (currently red/blue by sign)''' if center is None: center = 0 if colors is None: colors = np.array(((0, 0.7, 1), (0, 0, 0), (1, 0, 0)), dtype=float) if x.shape[-1] == 1: x = x[..., 0] x = scale_values(x, min=m, max=M, center=center) y = np.empty(x.shape + (3,)) for c in xrange(3): y[..., c] = np.interp(x, (0, 0.5, 1), colors[:, c]) return y
Example #11
Source File: test_control_curves.py From pywr with GNU General Public License v3.0 | 6 votes |
def test_circular_control_curve_interpolated_json(): # this is a little hack-y, as the parameters don't provide access to their # data once they've been initalised model = load_model("reservoir_with_circular_cc.json") reservoir1 = model.nodes["reservoir1"] model.setup() path = os.path.join(os.path.dirname(__file__), "models", "control_curve.csv") control_curve = pd.read_csv(path)["Control Curve"].values values = [-8, -6, -4] @assert_rec(model, reservoir1.cost) def expected_cost(timestep, si): # calculate expected cost manually and compare to parameter output volume_factor = reservoir1._current_pc[si.global_id] cc = control_curve[timestep.index] return np.interp(volume_factor, [0.0, cc, 1.0], values[::-1]) model.run()
Example #12
Source File: test_control_curves.py From pywr with GNU General Public License v3.0 | 6 votes |
def test_control_curve_interpolated_json(use_parameters): # this is a little hack-y, as the parameters don't provide access to their # data once they've been initalised if use_parameters: model = load_model("reservoir_with_cc_param_values.json") else: model = load_model("reservoir_with_cc.json") reservoir1 = model.nodes["reservoir1"] model.setup() path = os.path.join(os.path.dirname(__file__), "models", "control_curve.csv") control_curve = pd.read_csv(path)["Control Curve"].values values = [-8, -6, -4] @assert_rec(model, reservoir1.cost) def expected_cost(timestep, si): # calculate expected cost manually and compare to parameter output volume_factor = reservoir1._current_pc[si.global_id] cc = control_curve[timestep.index] return np.interp(volume_factor, [0.0, cc, 1.0], values[::-1]) model.run()
Example #13
Source File: helper.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def nan_helper(y): """Helper to handle indices and logical indices of NaNs. Input: - y, 1d numpy array with possible NaNs Output: - nans, logical indices of NaNs - index, a function, with signature indices= index(logical_indices), to convert logical indices of NaNs to 'equivalent' indices Example: >>> # linear interpolation of NaNs >>> nans, x= nan_helper(y) >>> y[nans]= np.interp(x(nans), x(~nans), y[~nans]) """ return np.isnan(y), lambda z: z.nonzero()[0]
Example #14
Source File: multi_problem_v2.py From tensor2tensor with Apache License 2.0 | 6 votes |
def linear_interpolation(x, xp, fp, **kwargs): """Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values. """ yp = fp.reshape([fp.shape[0], -1]).transpose() y = np.stack([np.interp(x, xp, zp, **kwargs) for zp in yp]).transpose() return y.reshape(x.shape[:1] + fp.shape[1:]).astype(np.float32)
Example #15
Source File: spectrum.py From StructEngPy with MIT License | 6 votes |
def __init__(self,alpha_max,Tg,xi): gamma=0.9+(0.05-xi)/(0.3+6*xi) eta1=0.02+(0.05-xi)/(4+32*xi) eta1=eta1 if eta1>0 else 0 eta2=1+(0.05-xi)/(0.08+1.6*xi) eta2=eta2 if eta2>0.55 else 0.55 T=np.linspace(0,6,601) alpha=[] for t in T: if t<0.1: alpha.append(np.interp(t,[0,0.1],[0.45*alpha_max,eta2*alpha_max])) elif t<Tg: alpha.append(eta2*alpha_max) elif t<5*Tg: alpha.append((Tg/t)**gamma*eta2*alpha_max) else: alpha.append((eta2*0.2**gamma-eta1*(t-5*Tg))*alpha_max) self.__spectrum={'T':T,'alpha':alpha}
Example #16
Source File: test_function_base.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test_basic(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.linspace(0, 1, 50) assert_almost_equal(np.interp(x0, x, y), x0)
Example #17
Source File: test_function_base.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test_exceptions(self): assert_raises(ValueError, interp, 0, [], []) assert_raises(ValueError, interp, 0, [0], [1, 2]) assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) assert_raises(ValueError, interp, 0, [], [], period=360) assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
Example #18
Source File: test_function_base.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test_right_left_behavior(self): # Needs range of sizes to test different code paths. # size ==1 is special cased, 1 < size < 5 is linear search, and # size >= 5 goes through local search and possibly binary search. for size in range(1, 10): xp = np.arange(size, dtype=np.double) yp = np.ones(size, dtype=np.double) incpts = np.array([-1, 0, size - 1, size], dtype=np.double) decpts = incpts[::-1] incres = interp(incpts, xp, yp) decres = interp(decpts, xp, yp) inctgt = np.array([1, 1, 1, 1], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0) decres = interp(decpts, xp, yp, left=0) inctgt = np.array([0, 1, 1, 1], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, right=2) decres = interp(decpts, xp, yp, right=2) inctgt = np.array([1, 1, 1, 2], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0, right=2) decres = interp(decpts, xp, yp, left=0, right=2) inctgt = np.array([0, 1, 1, 2], dtype=np.float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt)
Example #19
Source File: cmath.py From ehtplot with GNU General Public License v3.0 | 5 votes |
def interp(x, xp, yp): """Improve numpy's interp() function to allow decreasing `xp`""" if xp[0] < xp[-1]: return np.interp(x, xp, yp) else: return np.interp(x, np.flip(xp,0), np.flip(yp,0))
Example #20
Source File: train_softmax.py From TNT with GNU General Public License v3.0 | 5 votes |
def find_threshold(var, percentile): hist, bin_edges = np.histogram(var, 100) cdf = np.float32(np.cumsum(hist)) / np.sum(hist) bin_centers = (bin_edges[:-1]+bin_edges[1:])/2 #plt.plot(bin_centers, cdf) threshold = np.interp(percentile*0.01, cdf, bin_centers) return threshold
Example #21
Source File: common.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def make_cmap(name, n=256): data = cmap_data[name] xs = np.linspace(0.0, 1.0, n) channels = [] eps = 1e-6 for ch_name in ['blue', 'green', 'red']: ch_data = data[ch_name] xp, yp = [], [] for x, y1, y2 in ch_data: xp += [x, x+eps] yp += [y1, y2] ch = np.interp(xs, xp, yp) channels.append(ch) return np.uint8(np.array(channels).T*255)
Example #22
Source File: utils.py From SfmLearner-Pytorch with MIT License | 5 votes |
def high_res_colormap(low_res_cmap, resolution=1000, max_value=1): # Construct the list colormap, with interpolated values for higer resolution # For a linear segmented colormap, you can just specify the number of point in # cm.get_cmap(name, lutsize) with the parameter lutsize x = np.linspace(0,1,low_res_cmap.N) low_res = low_res_cmap(x) new_x = np.linspace(0,max_value,resolution) high_res = np.stack([np.interp(new_x, x, low_res[:,i]) for i in range(low_res.shape[1])], axis=1) return ListedColormap(high_res)
Example #23
Source File: core.py From ffn with MIT License | 5 votes |
def rescale(x, min=0., max=1., axis=0): """ Rescale values to fit a certain range [min, max] """ def innerfn(x, min, max): return np.interp(x, [np.min(x), np.max(x)], [min, max]) if isinstance(x, pd.DataFrame): return x.apply(innerfn, axis=axis, args=(min, max,)) else: return pd.Series(innerfn(x, min, max), index=x.index)
Example #24
Source File: PlotDataItem.py From tf-pose with Apache License 2.0 | 5 votes |
def _fourierTransform(self, x, y): ## Perform fourier transform. If x values are not sampled uniformly, ## then use np.interp to resample before taking fft. dx = np.diff(x) uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.)) if not uniform: x2 = np.linspace(x[0], x[-1], len(x)) y = np.interp(x2, x, y) x = x2 n = y.size f = np.fft.rfft(y) / n d = float(x[-1]-x[0]) / (len(x)-1) x = np.fft.rfftfreq(n, d) y = np.abs(f) return x, y
Example #25
Source File: hist_match.py From argus-tgs-salt with MIT License | 5 votes |
def hist_match(source, hist): """ Adjust the pixel values of a grayscale image such that its histogram matches a given one Arguments: ----------- source: np.ndarray Image to transform; the histogram is computed over the flattened array template: np.ndarray Template histogram Returns: ----------- matched: np.ndarray The transformed output image """ oldshape = source.shape source = source.ravel() # get the set of unique pixel values and their corresponding indices and # counts s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True) t_values = np.linspace(0, 255, 256).astype(np.int) # take the cumsum of the counts and normalize by the number of pixels to # get the empirical cumulative distribution functions for the source and # template images (maps pixel value --> quantile) s_quantiles = np.cumsum(s_counts).astype(np.float64) s_quantiles /= s_quantiles[-1] t_quantiles = np.cumsum(hist).astype(np.float64) t_quantiles /= t_quantiles[-1] # interpolate linearly to find the pixel values in the template image # that correspond most closely to the quantiles in the source image interp_t_values = np.interp(s_quantiles, t_quantiles, t_values) return interp_t_values[bin_idx].reshape(oldshape)
Example #26
Source File: create_mosaic.py From argus-tgs-salt with MIT License | 5 votes |
def hist_match(source, hist): """ Adjust the pixel values of a grayscale image such that its histogram matches a given one Arguments: ----------- source: np.ndarray Image to transform; the histogram is computed over the flattened array template: np.ndarray Template histogram Returns: ----------- matched: np.ndarray The transformed output image """ oldshape = source.shape source = source.ravel() # get the set of unique pixel values and their corresponding indices and # counts s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True) t_values = np.linspace(0, 255, 256).astype(np.int) # take the cumsum of the counts and normalize by the number of pixels to # get the empirical cumulative distribution functions for the source and # template images (maps pixel value --> quantile) s_quantiles = np.cumsum(s_counts).astype(np.float64) s_quantiles /= s_quantiles[-1] t_quantiles = np.cumsum(hist).astype(np.float64) t_quantiles /= t_quantiles[-1] # interpolate linearly to find the pixel values in the template image # that correspond most closely to the quantiles in the source image interp_t_values = np.interp(s_quantiles, t_quantiles, t_values) return interp_t_values[bin_idx].reshape(oldshape)
Example #27
Source File: colours.py From LSDMappingTools with MIT License | 5 votes |
def __call__(self, value, clip=None): # I'm ignoring masked values and all kinds of edge cases to make a # simple example... x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] return _np.ma.masked_array(_np.interp(value, x, y))
Example #28
Source File: colours.py From LSDMappingTools with MIT License | 5 votes |
def __call__(self, xi, alpha=1.0, **kw): yi = _np.interp(xi, self._x, self.transformed_levels) return self.cmap(yi / (self.levmax-self.levmin) + 0.5, alpha) # def __call__(self, xi, alpha=1.0, **kw): # yi = _np.interp(xi, self._x, self.transformed_levels) # return self.cmap(yi / self.levmax, alpha)
Example #29
Source File: cmath.py From ehtplot with GNU General Public License v3.0 | 5 votes |
def uniformize(Jpapbp, JpL=None, JpR=None, Jplower=None, Jpupper=None): """Make a sequential colormap uniform in lightness J'""" if JpL is None: JpL = Jpapbp[ 0,0] if JpR is None: JpR = Jpapbp[-1,0] if Jplower is not None: JpL, JpR = max(JpL, Jplower), max(JpR, Jplower) if Jpupper is not None: JpL, JpR = min(JpL, Jpupper), min(JpR, Jpupper) out = Jpapbp.copy() out[:,0] = np.linspace(JpL, JpR, out.shape[0]) out[:,1] = interp(out[:,0], Jpapbp[:,0], Jpapbp[:,1]) out[:,2] = interp(out[:,0], Jpapbp[:,0], Jpapbp[:,2]) return out
Example #30
Source File: cameraConfig.py From crappy with GNU General Public License v2.0 | 5 votes |
def run(self): """Expects a tuple of 3 args through the pipe: - out_size: Tuple, The dimensions of the output histogram image - hist_range: Tuple, The lower and upper value of the histogram (eg: (0,256) for full scale uint8) - img: A numpy array with the image, if not single channel, it will be converted to a single channel """ while True: out_size,hist_range,img = self.pipe.recv() if not isinstance(out_size,tuple): break hist_range = hist_range[0],hist_range[1]+1 #np.histogram removes 1 value to the output array, no idea why... if len(img.shape) == 3: img = np.mean(img,axis=2) assert len(img.shape) == 2,"Invalid image: shape= "+str(img.shape) # The actual histogram h = np.histogram(img,bins=np.arange(*hist_range))[0] x = np.arange(out_size[1])# The base of the image # We need to interpolate the histogram on the size of the output image l = hist_range[1]-hist_range[0]-1 fx = np.arange(0,out_size[1],out_size[1]/l,dtype=np.float) #fx *= out_size[1]/len(fx) h2 = np.interp(x,fx,h) h2 = np.nan_to_num(h2*out_size[0]/h2.max()) out_img = np.zeros(out_size) for i in range(out_size[1]): out_img[0:int(out_size[0]-h2[i]),i] = 255 self.pipe.send(out_img)