Python sklearn.preprocessing.minmax_scale() Examples
The following are 12
code examples of sklearn.preprocessing.minmax_scale().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.preprocessing
, or try the search function
.
Example #1
Source File: preprocess.py From deepJDOT with MIT License | 6 votes |
def min_max_scaling(data, lowerbound_zero=False): from sklearn.preprocessing import minmax_scale size = data.shape data = data/255.0 if not lowerbound_zero: data = (data *2.0)-1.0 data[np.isnan(data)] = 0 # if (len(size)==4): # for i in range(size[3]): # tmp = minmax_scale(data[:,:,:,i].reshape(-1, size[1]*size[2]), # feature_range = (s, t), axis=1) # data[:,:,:,i] = tmp.reshape(-1,size[1],size[2]) # elif (len(size)==3): # data = minmax_scale(data.reshape(-1, size[1]*size[2]), axis=1) # data = data.reshape(-1, size[1],size[2]) return data
Example #2
Source File: plot_tutorial2.py From BrainSpace with BSD 3-Clause "New" or "Revised" License | 6 votes |
def fusion(*args): from scipy.stats import rankdata from sklearn.preprocessing import minmax_scale max_rk = [None] * len(args) masks = [None] * len(args) for j, a in enumerate(args): m = masks[j] = a != 0 a[m] = rankdata(a[m]) max_rk[j] = a[m].max() max_rk = min(max_rk) for j, a in enumerate(args): m = masks[j] a[m] = minmax_scale(a[m], feature_range=(1, max_rk)) return np.hstack(args) # fuse the matrices
Example #3
Source File: plot_tutorial2.py From BrainSpace with BSD 3-Clause "New" or "Revised" License | 6 votes |
def fusion(*args): from scipy.stats import rankdata from sklearn.preprocessing import minmax_scale max_rk = [None] * len(args) masks = [None] * len(args) for j, a in enumerate(args): m = masks[j] = a != 0 a[m] = rankdata(a[m]) max_rk[j] = a[m].max() max_rk = min(max_rk) for j, a in enumerate(args): m = masks[j] a[m] = minmax_scale(a[m], feature_range=(1, max_rk)) return np.hstack(args) # fuse the matrices
Example #4
Source File: coKriging.py From gempy with GNU Lesser General Public License v3.0 | 6 votes |
def preprocess(self): """ Normalization of data between 0 and 1 and subtraction of the nuggets Returns: pandas.core.frame.DataFrame: Dataframe containing the transformed data pandas.core.frame.DataFrame: Containing the substracted nuggets """ import sklearn.preprocessing as skp # Normalization scaled_data = pn.DataFrame(skp.minmax_scale(self.exp_var_raw[self.properties]), columns=self.properties) # Nuggets nuggets = scaled_data[self.properties].iloc[0] processed_data = scaled_data - nuggets return processed_data, nuggets
Example #5
Source File: model_v40_BAK.py From Quora with MIT License | 5 votes |
def features_transformer(df_text): from nlp import meta_features_transformer from nlp import topic_features_transformer # get features meta_features = meta_features_transformer(df_text).values topic_features = topic_features_transformer(df_text).values # concat joined_features = np.hstack([meta_features, topic_features]) return minmax_scale(joined_features)
Example #6
Source File: RECI.py From CausalDiscoveryToolbox with MIT License | 5 votes |
def b_fit_score(self, x, y): """ Compute the RECI fit score Args: x (numpy.ndarray): Variable 1 y (numpy.ndarray): Variable 2 Returns: float: RECI fit score """ x = np.reshape(minmax_scale(x), (-1, 1)) y = np.reshape(minmax_scale(y), (-1, 1)) poly = PolynomialFeatures(degree=self.degree) poly_x = poly.fit_transform(x) poly_x[:,1] = 0 poly_x[:,2] = 0 regressor = LinearRegression() regressor.fit(poly_x, y) y_predict = regressor.predict(poly_x) error = mean_squared_error(y_predict, y) return error
Example #7
Source File: holoscopeFraudDect.py From HoloScope with Apache License 2.0 | 5 votes |
def weightWithDropslop(self, weighted, scale): 'weight the adjacency matrix with the sudden drop of ts for each col' if weighted: colWeights = np.multiply(self.tspim.dropslops, self.tspim.dropfalls) else: colWeights = self.tspim.dropslops if scale == 'logistic': from scipy.stats import logistic from sklearn import preprocessing 'zero mean scale' colWeights = preprocessing.scale(colWeights) colWeights = logistic.cdf(colWeights) elif scale == 'linear': from sklearn import preprocessing #add a base of suspecious for each edge colWeights = preprocessing.minmax_scale(colWeights) +1 elif scale == 'plusone': colWeights += 1 elif scale == 'log1p': colWeights = np.log1p(colWeights) + 1 else: print '[Warning] no scale for the prior weight' n = self.nV colDiag = lil_matrix((n, n)) colDiag.setdiag(colWeights) self.graphr = self.graphr * colDiag.tocsr() self.graph = self.graphr.tocoo(copy=False) self.graphc = self.graph.tocsc(copy=False) print "finished computing weight matrix"
Example #8
Source File: holoscopeFraudDect.py From HoloScope with Apache License 2.0 | 5 votes |
def evalsusp4rate(self, suspusers, neutral=False, scale='max'): susprates = self.ratepim.suspratedivergence(neutral, delta=True) if scale == 'max': assert(self.ratepim.maxratediv > 0) nsusprates = susprates/self.ratepim.maxratediv elif scale=='minmax': #need a copy, and do not change susprates' value for delta nsusprates = preprocessing.minmax_scale(susprates, copy=True) else: #no scale nsusprates = susprates return nsusprates
Example #9
Source File: tools.py From pylinac with MIT License | 5 votes |
def process_image(path): """Load and resize the images and return as flattened numpy array""" img = image.load(path, dtype=np.float32) resized_img = imresize(img.array, size=(100, 100), mode='F').flatten() rescaled_img = preprocessing.minmax_scale(resized_img) return rescaled_img
Example #10
Source File: heatmap.py From XenonPy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _transform(self, series): series_ = series if series.min() != series.max(): if self.bc: with np.errstate(all='raise'): shift = 1e-10 tmp = series - series.min() + shift try: series_, _ = boxcox(tmp) except FloatingPointError: series_ = series series_ = minmax_scale(series_) return series_
Example #11
Source File: img_proc.py From ml_code with Apache License 2.0 | 5 votes |
def getImgAsMatFromFile(filename, width=28, height=28, scale_min=0, scale_max=1): #img = io.imread(filename, as_grey=True) img = Image.open(filename) img = img.resize((width, height), Image.BILINEAR) imgArr_2d = np.array(img.convert('L')) imgArr_2d = np.float64(1 - imgArr_2d) shape_2d = imgArr_2d.shape imgArr_1d_scale = preprocessing.minmax_scale(imgArr_2d.flatten(), feature_range=(0, 1)) return imgArr_1d_scale.reshape(shape_2d)
Example #12
Source File: vectorSpatialAnalysis.py From python-urbanPlanning with MIT License | 4 votes |
def G_display(G): # make new graph H = nx.Graph() for v in G: # print(v) H.add_node(v) weightValue=list(nx.get_edge_attributes(G,'weight').values()) #提取权重 # weightsForWidth=[G[u][v]['weight'] for u,v in G.edges()] #another way # print(weightValue) import pysal.viz.mapclassify as mc q=mc.Quantiles(weightValue,k=30).bins #计算分位数,用于显示值的提取 # print(q) for (u, v, d) in tqdm(G.edges(data=True)): # print(u,v,d) # print() # print(d['weight']) if d['weight'] > q[28]: H.add_edge(u, v) print("H_digraph has %d nodes with %d edges"% (nx.number_of_nodes(H), nx.number_of_edges(H))) # draw with matplotlib/pylab plt.figure(figsize=(18, 18)) # m=2 # fig = figure(figsize=(9*m,9*m) # with nodes colored by degree sized by value elected node_color = [float(H.degree(v)) for v in H] # print(node_color) # nx.draw(H, G.position,node_size=[G.perimeter[v] for v in H],node_color=node_color, with_labels=True) weightsForWidthScale=np.interp(weightValue, (min(weightValue), max(weightValue)), (1, 3000)) #setting the edge width scaleNode=1 # sklearn.preprocessing.minmax_scale(X, feature_range=(0, 1), axis=0, copy=True) nx.draw(H, G.position,node_size=minmax_scale([G.shape_area[v]*scaleNode for v in H],feature_range=(10, 2200)), node_color=node_color,with_labels=True,edge_cmap=plt.cm.Blues,width=weightsForWidthScale) #edge_cmap=plt.cm.Blues # scale the axes equally # plt.xlim(-5000, 500) # plt.ylim(-2000, 3500) plt.show() #CSV文件转.shp格式,并返回关键信息。使用geopandas库实现