Python numpy.logaddexp() Examples
The following are 30
code examples of numpy.logaddexp().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: core.py From multiagent-particle-envs with MIT License | 6 votes |
def get_collision_force(self, entity_a, entity_b): if (not entity_a.collide) or (not entity_b.collide): return [None, None] # not a collider if (entity_a is entity_b): return [None, None] # don't collide against itself # compute actual distance between entities delta_pos = entity_a.state.p_pos - entity_b.state.p_pos dist = np.sqrt(np.sum(np.square(delta_pos))) # minimum allowable distance dist_min = entity_a.size + entity_b.size # softmax penetration k = self.contact_margin penetration = np.logaddexp(0, -(dist - dist_min)/k)*k force = self.contact_force * delta_pos / dist * penetration force_a = +force if entity_a.movable else None force_b = -force if entity_b.movable else None return [force_a, force_b]
Example #2
Source File: gradient_boosting.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def __call__(self, y, pred, sample_weight=None): """Compute the deviance (= 2 * negative log-likelihood). Parameters ---------- y : array, shape (n_samples,) True labels pred : array, shape (n_samples,) Predicted labels sample_weight : array-like, shape (n_samples,), optional Sample weights. """ # logaddexp(0, v) == log(1.0 + exp(v)) pred = pred.ravel() if sample_weight is None: return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred)) else: return (-2.0 / sample_weight.sum() * np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
Example #3
Source File: test_ufunc.py From vnpy_crypto with MIT License | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
Example #4
Source File: _gb_losses.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def __call__(self, y, raw_predictions, sample_weight=None): """Compute the deviance (= 2 * negative log-likelihood). Parameters ---------- y : 1d array, shape (n_samples,) True labels. raw_predictions : 2d array, shape (n_samples, K) The raw predictions (i.e. values from the tree leaves) of the tree ensemble. sample_weight : 1d array , shape (n_samples,), optional Sample weights. """ # logaddexp(0, v) == log(1.0 + exp(v)) raw_predictions = raw_predictions.ravel() if sample_weight is None: return -2 * np.mean((y * raw_predictions) - np.logaddexp(0, raw_predictions)) else: return (-2 / sample_weight.sum() * np.sum( sample_weight * ((y * raw_predictions) - np.logaddexp(0, raw_predictions))))
Example #5
Source File: test_ufunc.py From pySINDy with MIT License | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
Example #6
Source File: test_ufunc.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
Example #7
Source File: bagging.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes): """Private function used to compute log probabilities within a job.""" n_samples = X.shape[0] log_proba = np.empty((n_samples, n_classes)) log_proba.fill(-np.inf) all_classes = np.arange(n_classes, dtype=np.int) for estimator, features in zip(estimators, estimators_features): log_proba_estimator = estimator.predict_log_proba(X[:, features]) if n_classes == len(estimator.classes_): log_proba = np.logaddexp(log_proba, log_proba_estimator) else: log_proba[:, estimator.classes_] = np.logaddexp( log_proba[:, estimator.classes_], log_proba_estimator[:, range(len(estimator.classes_))]) missing = np.setdiff1d(all_classes, estimator.classes_) log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf) return log_proba
Example #8
Source File: models.py From pysaliency with MIT License | 6 votes |
def sample_from_logprobabilities(log_probabilities, size=1, rst=None): """ Sample from log probabilities (robust to many bins and small probabilities). +-np.inf and np.nan will be interpreted as zero probability """ if rst is None: rst = np.random log_probabilities = np.asarray(log_probabilities) valid_indices = np.nonzero(np.isfinite(log_probabilities))[0] valid_log_probabilities = log_probabilities[valid_indices] ndxs = valid_log_probabilities.argsort() sorted_log_probabilities = valid_log_probabilities[ndxs] cumsums = np.logaddexp.accumulate(sorted_log_probabilities) cumsums -= cumsums[-1] tmps = -rst.exponential(size=size) js = np.searchsorted(cumsums, tmps) valid_values = ndxs[js] values = valid_indices[valid_values] return values
Example #9
Source File: test_ufunc.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod, np.greater, np.greater_equal, np.less, np.less_equal, np.equal, np.not_equal] a = np.array('1') b = 1 c = np.array([1., 2.]) for f in binary_funcs: assert_raises(TypeError, f, a, b) assert_raises(TypeError, f, c, a)
Example #10
Source File: test_ufunc.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod, np.greater, np.greater_equal, np.less, np.less_equal, np.equal, np.not_equal] a = np.array('1') b = 1 c = np.array([1., 2.]) for f in binary_funcs: assert_raises(TypeError, f, a, b) assert_raises(TypeError, f, c, a)
Example #11
Source File: test_softmax_cross_entropy.py From chainer with MIT License | 6 votes |
def expected_forward_without_reduce(self, x_data, t_data, class_weight): x = numpy.rollaxis(x_data, 1, x_data.ndim).reshape( (t_data.size, x_data.shape[1])) t = t_data.ravel() loss_shape = x_data.shape[0:1] + x_data.shape[2:] loss_expect = numpy.zeros(loss_shape, x_data.dtype) for i, (ti, loss_idx) in enumerate(zip(t, numpy.ndindex(*loss_shape))): xi = x[i] if ti == -1: continue log_z = numpy.ufunc.reduce(numpy.logaddexp, xi) if class_weight is None: loss_expect[loss_idx] = -(xi - log_z)[ti] else: loss_expect[loss_idx] = -(xi - log_z)[ti] * class_weight[ti] return numpy.asarray(loss_expect, dtype=x.dtype)
Example #12
Source File: test_ufunc.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
Example #13
Source File: test_umath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_inf(self): inf = np.inf x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='raise'): for dt in ['f', 'd', 'g']: logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_equal(np.logaddexp(logxf, logyf), logzf)
Example #14
Source File: test_umath.py From pySINDy with MIT License | 5 votes |
def test_nan(self): assert_(np.isnan(np.logaddexp(np.nan, np.inf))) assert_(np.isnan(np.logaddexp(np.inf, np.nan))) assert_(np.isnan(np.logaddexp(np.nan, 0))) assert_(np.isnan(np.logaddexp(0, np.nan))) assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
Example #15
Source File: test_umath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_nan(self): assert_(np.isnan(np.logaddexp(np.nan, np.inf))) assert_(np.isnan(np.logaddexp(np.inf, np.nan))) assert_(np.isnan(np.logaddexp(np.nan, 0))) assert_(np.isnan(np.logaddexp(0, np.nan))) assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
Example #16
Source File: test_umath.py From pySINDy with MIT License | 5 votes |
def test_logaddexp_range(self): x = [1000000, -1000000, 1000200, -1000200] y = [1000200, -1000200, 1000000, -1000000] z = [1000200, -1000000, 1000200, -1000000] for dt in ['f', 'd', 'g']: logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
Example #17
Source File: models.py From pysaliency with MIT License | 5 votes |
def pixel_space_information_gain(self, baseline, gold_standard, stimulus, eps=1e-20): log_p_gold = gold_standard.log_density(stimulus) log_p_baseline = baseline.log_density(stimulus) log_p_model = self.log_density(stimulus) p_gold = np.exp(log_p_gold) p_gold[p_gold == 0] = p_gold[p_gold > 0].min() ig = (p_gold)*(np.logaddexp(log_p_model, np.log(eps))-np.logaddexp(log_p_baseline, np.log(eps))) return ig
Example #18
Source File: baseline_utils.py From pysaliency with MIT License | 5 votes |
def score_samples(self, X): kde_logliks = self.kde.score_samples(X[:, :2]) logliks = np.logaddexp( self.kde_constant + kde_logliks, self.uniform_constant + self.uniform_density ) return logliks
Example #19
Source File: baseline_utils.py From pysaliency with MIT License | 5 votes |
def score_samples(self, X): assert X.shape[1] == 3 kde_logliks = self.kde.score_samples(X[:, :2]) fix_ns = X[:, 2].astype(int) fix_lls = self.regularizing_log_likelihoods[fix_ns] logliks = np.logaddexp( self.kde_constant + kde_logliks, self.uniform_constant + fix_lls ) return logliks
Example #20
Source File: test_umath.py From pySINDy with MIT License | 5 votes |
def test_logaddexp_values(self): x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): xf = np.log(np.array(x, dtype=dt)) yf = np.log(np.array(y, dtype=dt)) zf = np.log(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
Example #21
Source File: core.py From Gun-Detector with Apache License 2.0 | 5 votes |
def rdp_pure_eps(logq, pure_eps, orders): """Computes the RDP value given logq and pure privacy eps. Implementation of https://arxiv.org/abs/1610.05755, Theorem 3. The bound used is the min of three terms. The first term is from https://arxiv.org/pdf/1605.02065.pdf. The second term is based on the fact that when event has probability (1-q) for q close to zero, q can only change by exp(eps), which corresponds to a much smaller multiplicative change in (1-q) The third term comes directly from the privacy guarantee. Args: logq: Natural logarithm of the probability of a non-optimal outcome. pure_eps: eps parameter for DP orders: array_like list of moments to compute. Returns: Array of upper bounds on rdp (a scalar if orders is a scalar). """ orders_vec = np.atleast_1d(orders) q = math.exp(logq) log_t = np.full_like(orders_vec, np.inf) if q <= 1 / (math.exp(pure_eps) + 1): logt_one = math.log1p(-q) + ( math.log1p(-q) - _log1mexp(pure_eps + logq)) * ( orders_vec - 1) logt_two = logq + pure_eps * (orders_vec - 1) log_t = np.logaddexp(logt_one, logt_two) ret = np.minimum( np.minimum(0.5 * pure_eps * pure_eps * orders_vec, log_t / (orders_vec - 1)), pure_eps) if np.isscalar(orders): return np.asscalar(ret) else: return ret
Example #22
Source File: test_umath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_logaddexp_range(self): x = [1000000, -1000000, 1000200, -1000200] y = [1000200, -1000200, 1000000, -1000000] z = [1000200, -1000000, 1000200, -1000000] for dt in ['f', 'd', 'g']: logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
Example #23
Source File: test_umath.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def test_logaddexp_values(self): x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): xf = np.log(np.array(x, dtype=dt)) yf = np.log(np.array(y, dtype=dt)) zf = np.log(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
Example #24
Source File: evaluate_contact_prediction_metrics.py From tape-neurips2019 with MIT License | 5 votes |
def sigmoid(x): """ 1-D Sigmoid that is more stable. """ return math.exp(-np.logaddexp(0, -x))
Example #25
Source File: test_softmax_cross_entropy.py From chainer with MIT License | 5 votes |
def expected_forward_with_reduce(self, x_data, t_data, class_weight): # Compute expected value loss_expect = 0.0 count = 0 x = numpy.rollaxis(x_data, 1, x_data.ndim).reshape( (t_data.size, x_data.shape[1])) t = t_data.ravel() for xi, ti in six.moves.zip(x, t): if ti == -1: continue log_z = numpy.ufunc.reduce(numpy.logaddexp, xi) if class_weight is None: loss_expect -= (xi - log_z)[ti] else: loss_expect -= (xi - log_z)[ti] * class_weight[ti] count += 1 if self.normalize: if count == 0: loss_expect = 0.0 else: loss_expect /= count else: if len(t_data) == 0: loss_expect = 0.0 else: loss_expect /= len(t_data) return numpy.asarray(loss_expect, dtype=x.dtype)
Example #26
Source File: test_log_softmax.py From chainer with MIT License | 5 votes |
def forward_expected(self, inputs): x, = inputs log_z = numpy.ufunc.reduce( numpy.logaddexp, x, axis=self.axis, keepdims=True) y_expect = x - log_z return y_expect,
Example #27
Source File: test_negative_sampling.py From chainer with MIT License | 5 votes |
def test_forward(self, backend_config): x_data = backend_config.get_array(self.x) t_data = backend_config.get_array(self.t) x = chainer.Variable(x_data) t = chainer.Variable(t_data, requires_grad=False) link = self.create_link() link.to_device(backend_config.device) y, samples = link(x, t, reduce=self.reduce, return_samples=True) self.assertEqual(y.shape, self.gy.shape) cpu_device = CpuDevice() W = cpu_device.send(link.W.data) samples = cpu_device.send(samples) loss = numpy.empty((len(self.x),), self.dtype) for i in range(len(self.x)): ix = self.x[i] it = self.t[i] if it == -1: loss[i] = 0 else: w = W[samples[i]] f = w.dot(ix) # first one is positive example f[0] *= -1 loss[i] = numpy.logaddexp(f, 0).sum() if self.reduce == 'sum': loss = loss.sum() testing.assert_allclose(y.data, loss, **self.test_forward_options)
Example #28
Source File: logistic_reg.py From particles with MIT License | 5 votes |
def logpyt(self, theta, t): # log-likelihood factor t, for given theta lin = np.matmul(theta['beta'], data[t, :]) return - np.logaddexp(0., -lin) # algorithms # N and values of M set above according to dataset
Example #29
Source File: stt_metric.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10): label_ = [0, 0] prob[prob < 1 / big_num] = 1 / big_num log_prob = np.log(prob) l = len(label) for i in range(l): label_.append(int(label[i])) label_.append(0) l_ = 2 * l + 1 a = np.full((seq_length, l_ + 1), -big_num) a[0][1] = log_prob[remainder][0] a[0][2] = log_prob[remainder][label_[2]] for i in range(1, seq_length): row = i * int(batch_size / num_gpu) + remainder a[i][1] = a[i - 1][1] + log_prob[row][0] a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]] for j in range(3, l_ + 1): a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1]) if label_[j] != 0 and label_[j] != label_[j - 2]: a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2]) a[i][j] += log_prob[row][label_[j]] return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1]) # label is done with remove_blank # pred is got from pred_best
Example #30
Source File: math_ops.py From trax with Apache License 2.0 | 5 votes |
def logaddexp(x1, x2): amax = maximum(x1, x2) delta = x1 - x2 return array_ops.where( isnan(delta), x1 + x2, # NaNs or infinities of the same sign. amax + log1p(exp(-abs(delta))))