Python sklearn.metrics.mean_absolute_error() Examples
The following are 30
code examples of sklearn.metrics.mean_absolute_error().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.metrics
, or try the search function
.
Example #1
Source File: BDA.py From transferlearning with MIT License | 7 votes |
def proxy_a_distance(source_X, target_X): """ Compute the Proxy-A-Distance of a source/target representation """ nb_source = np.shape(source_X)[0] nb_target = np.shape(target_X)[0] train_X = np.vstack((source_X, target_X)) train_Y = np.hstack((np.zeros(nb_source, dtype=int), np.ones(nb_target, dtype=int))) clf = svm.LinearSVC(random_state=0) clf.fit(train_X, train_Y) y_pred = clf.predict(train_X) error = metrics.mean_absolute_error(train_Y, y_pred) dist = 2 * (1 - 2 * error) return dist
Example #2
Source File: test_regression.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_regression_custom_weights(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6]) rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6]) evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6]) assert_almost_equal(msew, 0.39, decimal=2) assert_almost_equal(maew, 0.475, decimal=3) assert_almost_equal(rw, 0.94, decimal=2) assert_almost_equal(evsw, 0.94, decimal=2) # Handling msle separately as it does not accept negative inputs. y_true = np.array([[0.5, 1], [1, 2], [7, 6]]) y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]]) msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred), multioutput=[0.3, 0.7]) assert_almost_equal(msle, msle2, decimal=2)
Example #3
Source File: score_dataset.py From snape with Apache License 2.0 | 6 votes |
def score_regression(y, y_hat, report=True): """ Create regression score :param y: :param y_hat: :return: """ r2 = r2_score(y, y_hat) rmse = sqrt(mean_squared_error(y, y_hat)) mae = mean_absolute_error(y, y_hat) report_string = "---Regression Score--- \n" report_string += "R2 = " + str(r2) + "\n" report_string += "RMSE = " + str(rmse) + "\n" report_string += "MAE = " + str(mae) + "\n" if report: print(report_string) return mae, report_string
Example #4
Source File: model_eval.py From healthcareai-py with MIT License | 6 votes |
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test): """ Given a trained estimator, calculate metrics. Args: trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()` y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions) x_test (numpy.ndarray): A 2d numpy array of the x_test set (features) Returns: dict: A dictionary of metrics objects """ # Get predictions predictions = trained_sklearn_estimator.predict(x_test) # Calculate individual metrics mean_squared_error = skmetrics.mean_squared_error(y_test, predictions) mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions) result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error} return result
Example #5
Source File: test.py From TrafficFlowPrediction with MIT License | 6 votes |
def eva_regress(y_true, y_pred): """Evaluation evaluate the predicted resul. # Arguments y_true: List/ndarray, ture data. y_pred: List/ndarray, predicted data. """ mape = MAPE(y_true, y_pred) vs = metrics.explained_variance_score(y_true, y_pred) mae = metrics.mean_absolute_error(y_true, y_pred) mse = metrics.mean_squared_error(y_true, y_pred) r2 = metrics.r2_score(y_true, y_pred) print('explained_variance_score:%f' % vs) print('mape:%f%%' % mape) print('mae:%f' % mae) print('mse:%f' % mse) print('rmse:%f' % math.sqrt(mse)) print('r2:%f' % r2)
Example #6
Source File: generic.py From datastories-semeval2017-task4 with MIT License | 6 votes |
def macro_mae(y_test, y_pred, classes): cat_to_class_mapping = {v: int(k) for k, v in get_labels_to_categories_map(classes).items()} _y_test = [cat_to_class_mapping[y] for y in y_test] _y_pred = [cat_to_class_mapping[y] for y in y_pred] c = Counter(_y_pred) print(c) classes = set(_y_test) micro_m = {} for c in classes: class_sentences = [(t, p) for t, p in zip(_y_test, _y_pred) if t == c] yt = [y[0] for y in class_sentences] yp = [y[1] for y in class_sentences] micro_m[c] = mean_absolute_error(yt, yp) # pprint.pprint(sorted(micro_m.items(), key=lambda x: x[1], reverse=True)) return numpy.mean(list(micro_m.values()))
Example #7
Source File: feature_extractor.py From plastering with MIT License | 6 votes |
def paa(data, period=15): numCoeff = int(len(data)/period) data = data[:numCoeff*period] data = data[:int(len(data)/numCoeff)*numCoeff] origData = deepcopy(data) N = len(data) segLen = int(N/numCoeff) sN = np.reshape(data, (numCoeff, segLen)) g = lambda data: np.mean(data) # avg = np.mean(sN) avg = map(g,sN) data = np.matlib.repmat(avg, segLen, 1) data = data.ravel(order='F') # plt.plot(data) # plt.plot(origData) # plt.show() #rmse = sqrt(mean_squared_error(data, origData)) mae = sqrt(mean_absolute_error(data, origData)) # mae = np.var(origData-data) return mae
Example #8
Source File: feature_extractor.py From plastering with MIT License | 6 votes |
def pla(data, period=15): N = int(len(data)/period) orig_x = range(0,len(data)) tck = splrep(orig_x, data,s=0) test_xs = np.linspace(0,len(data),N) spline_ys = splev(test_xs, tck) spline_yps = splev(test_xs, tck, der=1) xi = np.unique(tck[0]) yi = [[splev(x, tck, der=j) for j in xrange(3)] for x in xi] P = interpolate.PiecewisePolynomial(xi,yi,orders=1) test_ys = P(test_xs) #inter_y = interp0(test_xs, test_ys, orig_x) inter_y = interp1(test_xs, test_ys, orig_x) mae = sqrt(mean_absolute_error(inter_y, data)) # mae = np.var(inter_y-data) return mae #def paa(data, period=15):
Example #9
Source File: feature_extractor.py From plastering with MIT License | 6 votes |
def paa(data, period=15): numCoeff = int(len(data)/period) data = data[:numCoeff*period] data = data[:int(len(data)/numCoeff)*numCoeff] origData = deepcopy(data) N = len(data) segLen = int(N/numCoeff) sN = np.reshape(data, (numCoeff, segLen)) g = lambda data: np.mean(data) # avg = np.mean(sN) avg = map(g,sN) data = np.matlib.repmat(avg, segLen, 1) data = data.ravel(order='F') # plt.plot(data) # plt.plot(origData) # plt.show() #rmse = sqrt(mean_squared_error(data, origData)) mae = sqrt(mean_absolute_error(data, origData)) # mae = np.var(origData-data) return mae
Example #10
Source File: feature_extractor.py From plastering with MIT License | 6 votes |
def pla(data, period=15): N = int(len(data)/period) orig_x = range(0,len(data)) tck = splrep(orig_x, data,s=0) test_xs = np.linspace(0,len(data),N) spline_ys = splev(test_xs, tck) spline_yps = splev(test_xs, tck, der=1) xi = np.unique(tck[0]) yi = [[splev(x, tck, der=j) for j in xrange(3)] for x in xi] P = interpolate.PiecewisePolynomial(xi,yi,orders=1) test_ys = P(test_xs) #inter_y = interp0(test_xs, test_ys, orig_x) inter_y = interp1(test_xs, test_ys, orig_x) mae = sqrt(mean_absolute_error(inter_y, data)) # mae = np.var(inter_y-data) return mae #def paa(data, period=15):
Example #11
Source File: test_genetic.py From gplearn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_parsimony_coefficient(): """Check that parsimony coefficients work and that results differ""" est1 = SymbolicRegressor(population_size=100, generations=2, parsimony_coefficient=0.001, random_state=0) est1.fit(boston.data[:400, :], boston.target[:400]) est1 = mean_absolute_error(est1.predict(boston.data[400:, :]), boston.target[400:]) est2 = SymbolicRegressor(population_size=100, generations=2, parsimony_coefficient='auto', random_state=0) est2.fit(boston.data[:400, :], boston.target[:400]) est2 = mean_absolute_error(est2.predict(boston.data[400:, :]), boston.target[400:]) assert(abs(est1 - est2) > 0.01)
Example #12
Source File: test_genetic.py From gplearn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_subsample(): """Check that subsample work and that results differ""" est1 = SymbolicRegressor(population_size=100, generations=2, max_samples=1.0, random_state=0) est1.fit(boston.data[:400, :], boston.target[:400]) est1 = mean_absolute_error(est1.predict(boston.data[400:, :]), boston.target[400:]) est2 = SymbolicRegressor(population_size=100, generations=2, max_samples=0.5, random_state=0) est2.fit(boston.data[:400, :], boston.target[:400]) est2 = mean_absolute_error(est2.predict(boston.data[400:, :]), boston.target[400:]) assert(abs(est1 - est2) > 0.01)
Example #13
Source File: test_genetic.py From gplearn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_trigonometric(): """Check that using trig functions work and that results differ""" est1 = SymbolicRegressor(population_size=100, generations=2, random_state=0) est1.fit(boston.data[:400, :], boston.target[:400]) est1 = mean_absolute_error(est1.predict(boston.data[400:, :]), boston.target[400:]) est2 = SymbolicRegressor(population_size=100, generations=2, function_set=['add', 'sub', 'mul', 'div', 'sin', 'cos', 'tan'], random_state=0) est2.fit(boston.data[:400, :], boston.target[:400]) est2 = mean_absolute_error(est2.predict(boston.data[400:, :]), boston.target[400:]) assert(abs(est1 - est2) > 0.01)
Example #14
Source File: test_builder.py From gordo with GNU Affero General Public License v3.0 | 6 votes |
def test_metrics_from_list(): """ Check getting functions from a list of metric names """ default = ModelBuilder.metrics_from_list() assert default == [ metrics.explained_variance_score, metrics.r2_score, metrics.mean_squared_error, metrics.mean_absolute_error, ] specifics = ModelBuilder.metrics_from_list( ["sklearn.metrics.adjusted_mutual_info_score", "sklearn.metrics.r2_score"] ) assert specifics == [metrics.adjusted_mutual_info_score, metrics.r2_score]
Example #15
Source File: test_run.py From nyaggle with MIT License | 6 votes |
def test_experiment_cat_custom_eval(tmpdir_name): X, y = make_regression_df(n_samples=1024, n_num_features=10, n_cat_features=2, random_state=0, id_column='user_id') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) params = { 'max_depth': 8, 'num_boost_round': 100, 'eval_metric': 'MAE' } result = run_experiment(params, X_train, y_train, X_test, tmpdir_name, algorithm_type='cat', eval_func=mean_absolute_error) assert mean_absolute_error(y_train, result.oof_prediction) == result.metrics[-1] _check_file_exists(tmpdir_name)
Example #16
Source File: test_regression.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_multioutput_regression(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) error = mean_squared_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = mean_squared_log_error(y_true, y_pred) assert_almost_equal(error, 0.200, decimal=2) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. error = mean_absolute_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = r2_score(y_true, y_pred, multioutput='variance_weighted') assert_almost_equal(error, 1. - 5. / 2) error = r2_score(y_true, y_pred, multioutput='uniform_average') assert_almost_equal(error, -.875)
Example #17
Source File: test_regression.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_regression_metrics_at_limits(): assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(max_error([0.], [0.]), 0.00, 2) assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2) assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2) assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be " "used when targets contain negative values.", mean_squared_log_error, [-1.], [-1.]) assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be " "used when targets contain negative values.", mean_squared_log_error, [1., 2., 3.], [1., -2., 3.]) assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be " "used when targets contain negative values.", mean_squared_log_error, [1., -2., 3.], [1., 2., 3.])
Example #18
Source File: predictionclasses.py From predictatops with MIT License | 5 votes |
def mean_absolute_error_func(self): """ THINGS GO HERE """ # self.TopTarget_DEPTH print(type(self.calc_pred_TopTarget_DEPTH)) print(type(self.calc_pred_TopTarget_DEPTH[self.TopTarget_DEPTH])) print(type(self.calc_pred_Top_Pick_pred_DEPT_pred)) print( type( self.calc_pred_Top_Pick_pred_DEPT_pred[ self.TopTarget_Pick_pred_DEPT_pred ] ) ) print(type(self.TopTarget_DEPTH)) print(type(self.TopTarget_Pick_pred_DEPT_pred)) mean_absolute_error_ = mean_absolute_error( self.calc_pred_TopTarget_DEPTH[self.TopTarget_DEPTH], self.calc_pred_Top_Pick_pred_DEPT_pred[self.TopTarget_Pick_pred_DEPT_pred], ) # mean_absolute_error_ = mean_absolute_error(self.calc_pred_TopTarget_DEPTH[self.TopTarget_DEPTH], self.calc_pred_Top_Pick_pred_DEPT_pred[self.TopTarget_Pick_pred_DEPT_pred]) # mean_absolute_error_ = mean_absolute_error(self.calc_pred_TopTarget_DEPTH['TopTarget_DEPTH'], self.calc_pred_Top_Pick_pred_DEPT_pred['TopTarget_Pick_pred_DEPT_pred']) return mean_absolute_error_
Example #19
Source File: evaluator.py From RegRCNN with Apache License 2.0 | 5 votes |
def MAE(y_true, y_pred, weights=None): if len(y_true)>0: return mean_absolute_error(y_true, y_pred, sample_weight=weights) else: return np.nan
Example #20
Source File: evaluation.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def eval_sent_qe(gt_list, pred_list, qe_type): from sklearn.metrics import mean_absolute_error, mean_squared_error import numpy as np pred_fin=[] for pred_batch in pred_list: pred_fin.extend(pred_batch.flatten()) logging.info('**Predicted scores**') logging.info(pred_fin) if len(gt_list) > 0: mse = mean_squared_error(gt_list, pred_fin) rmse = np.sqrt(mse) mae = mean_absolute_error(gt_list, pred_fin) pear_corr = np.corrcoef(gt_list, pred_fin)[0, 1] logging.info('**'+qe_type+'QE**') logging.info('Pearson %.4f' % pear_corr) logging.info('MAE %.4f' % mae) logging.info('RMSE %.4f' % rmse) return {'pearson': pear_corr, 'mae': mae, 'rmse': rmse, 'pred': pred_fin} else: return {'pred': pred_fin}
Example #21
Source File: rmse_calculator.py From yelp with GNU Lesser General Public License v2.1 | 5 votes |
def calculate_mae(true_values, predictions): return mean_absolute_error(true_values, predictions)
Example #22
Source File: train.py From mlflow-example with Apache License 2.0 | 5 votes |
def eval_metrics(actual, pred): rmse = np.sqrt(mean_squared_error(actual, pred)) mae = mean_absolute_error(actual, pred) r2 = r2_score(actual, pred) return rmse, mae, r2
Example #23
Source File: evaluator.py From RegRCNN with Apache License 2.0 | 5 votes |
def MAE_w_std(y_true, y_pred, weights=None): if len(y_true)>0: y_true, y_pred = np.array(y_true), np.array(y_pred) deltas = np.abs(y_true-y_pred) mae = np.average(deltas, weights=weights, axis=0).item() skmae = mean_absolute_error(y_true, y_pred, sample_weight=weights) assert np.allclose(mae, skmae, atol=1e-6), "mae {}, sklearn mae {}".format(mae, skmae) std = np.std(weights*deltas) return mae, std else: return np.nan, np.nan
Example #24
Source File: MEDA.py From transferlearning with MIT License | 5 votes |
def proxy_a_distance(source_X, target_X): """ Compute the Proxy-A-Distance of a source/target representation """ nb_source = np.shape(source_X)[0] nb_target = np.shape(target_X)[0] train_X = np.vstack((source_X, target_X)) train_Y = np.hstack((np.zeros(nb_source, dtype=int), np.ones(nb_target, dtype=int))) clf = svm.LinearSVC(random_state=0) clf.fit(train_X, train_Y) y_pred = clf.predict(train_X) error = metrics.mean_absolute_error(train_Y, y_pred) dist = 2 * (1 - 2 * error) return dist
Example #25
Source File: nn_trainer_evaluate.py From CityEnergyAnalyst with MIT License | 5 votes |
def get_nn_performance(model, scalerT, scalerX, urban_input_matrix, urban_taget_matrix, locator): input_NN_x = urban_input_matrix target_NN_t = urban_taget_matrix inputs_x = scalerX.transform(input_NN_x) model_estimates = model.predict(inputs_x) filtered_predict = scalerT.inverse_transform(model_estimates) rmse_Qhsf = sqrt(mean_squared_error(target_NN_t[:, 0], filtered_predict[:, 0])) rmse_Qcsf = sqrt(mean_squared_error(target_NN_t[:, 1], filtered_predict[:, 1])) rmse_Qwwf = sqrt(mean_squared_error(target_NN_t[:, 2], filtered_predict[:, 2])) rmse_Ef = sqrt(mean_squared_error(target_NN_t[:, 3], filtered_predict[:, 3])) rmse_T_int = sqrt(mean_squared_error(target_NN_t[:, 4], filtered_predict[:, 4])) mbe_Qhsf = mean_absolute_error(target_NN_t[:, 0], filtered_predict[:, 0]) mbe_Qcsf = mean_absolute_error(target_NN_t[:, 1], filtered_predict[:, 1]) mbe_Qwwf = mean_absolute_error(target_NN_t[:, 2], filtered_predict[:, 2]) mbe_Ef = mean_absolute_error(target_NN_t[:, 3], filtered_predict[:, 3]) mbe_T_int = mean_absolute_error(target_NN_t[:, 4], filtered_predict[:, 4]) print ("the rmse of Qhsf is %d and the mbe is %d" %(rmse_Qhsf, mbe_Qhsf)) print (rmse_Qcsf, mbe_Qcsf) print (rmse_Qwwf, mbe_Qwwf) print (rmse_Ef, mbe_Ef) print (rmse_T_int, mbe_T_int) model_estimates = locator.get_neural_network_estimates() filtered_predict = pd.DataFrame(filtered_predict) filtered_predict.to_csv(model_estimates, index=False, header=False, float_format='%.3f', decimal='.') return urban_input_matrix, urban_taget_matrix
Example #26
Source File: regression_metric.py From FATE with Apache License 2.0 | 5 votes |
def compute(labels, pred_scores): return mean_absolute_error(labels, pred_scores)
Example #27
Source File: regression_loss_test.py From FATE with Apache License 2.0 | 5 votes |
def test_compute_loss(self): sklearn_loss = metrics.mean_absolute_error(self.y_list, self.predict_list) lae_loss = self.lae_loss.compute_loss(self.y, self.predict) self.assertTrue(np.fabs(lae_loss - sklearn_loss) < consts.FLOAT_ZERO)
Example #28
Source File: engine_output_creation.py From timecop with Apache License 2.0 | 5 votes |
def metrics_generation(self, list_test, list_yhat): #list_test = df_test['valores'].values mse = mean_squared_error(list_test, list_yhat) rmse = np.sqrt(mse) self.engine_output['rmse'] = rmse self.engine_output['mse'] = mse self.engine_output['mae'] = mean_absolute_error(list_test, list_yhat) self.engine_output['smape'] = smape(list_test, list_yhat)
Example #29
Source File: evaluation.py From cddd with MIT License | 5 votes |
def qsar_regression(emb, groups, labels): """Helper function that fits and scores a SVM regressor on the extracted molecular descriptor in a leave-one-group-out cross-validation manner. Args: emb: Embedding (molecular descriptor) that is used as input for the SVM groups: Array or list with n_samples entries defining the fold membership for the crossvalidtion. labels: Target values of the of the qsar task. Returns: The mean accuracy, F1-score, ROC-AUC and prescion-recall-AUC of the cross-validation. """ r2 = [] r = [] mse = [] mae = [] logo = LeaveOneGroupOut() clf = SVR(kernel='rbf', C=5.0) for train_index, test_index in logo.split(emb, groups=groups): clf.fit(emb[train_index], labels[train_index]) y_pred = clf.predict(emb[test_index]) y_true = labels[test_index] r2.append(r2_score(y_true, y_pred)) r.append(spearmanr(y_true, y_pred)[0]) mse.append(mean_squared_error(y_true, y_pred)) mae.append(mean_absolute_error(y_true, y_pred)) return np.mean(r2), np.mean(r), np.mean(mse), np.mean(mae)
Example #30
Source File: helpers.py From timecop with Apache License 2.0 | 5 votes |
def predict_and_score(model, X, Y,scaler): # Make predictions on the original scale of the data. pred_scaled = model.predict(X) pred = scaler.inverse_transform(pred_scaled) # Prepare Y data to also be on the original scale for interpretability. orig_data = scaler.inverse_transform([Y]) # Calculate RMSE. score = mean_squared_error(orig_data[0], pred[:, 0]) mae = mean_absolute_error(orig_data[0], pred[:, 0]) return(score, pred, pred_scaled,mae)