Python autograd.numpy.maximum() Examples

The following are 27 code examples of autograd.numpy.maximum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: likelihood.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def _composite_log_likelihood(data, demo, mut_rate=None, truncate_probs=0.0, vector=False, p_missing=None, use_pairwise_diffs=False, **kwargs):
    try:
        sfs = data.sfs
    except AttributeError:
        sfs = data

    sfs_probs = np.maximum(expected_sfs(demo, sfs.configs, normalized=True, **kwargs),
                           truncate_probs)
    log_lik = sfs._integrate_sfs(np.log(sfs_probs), vector=vector)

    # add on log likelihood of poisson distribution for total number of SNPs
    if mut_rate is not None:
        log_lik = log_lik + \
            _mut_factor(sfs, demo, mut_rate, vector,
                        p_missing, use_pairwise_diffs)

    if not vector:
        log_lik = np.squeeze(log_lik)
    return log_lik 
Example #2
Source File: geometry.py    From AeroSandbox with MIT License 6 votes vote down vote up
def get_thickness_at_chord_fraction_legacy(self, chord_fraction):
        # Returns the (interpolated) camber at a given location(s). The location is specified by the chord fraction, as measured from the leading edge. Thickness is nondimensionalized by chord (i.e. this function returns t/c at a given x/c).
        chord = np.max(self.coordinates[:, 0]) - np.min(
            self.coordinates[:, 0])  # This should always be 1, but this is just coded for robustness.

        x = chord_fraction * chord + min(self.coordinates[:, 0])

        upperCoors = self.upper_coordinates()
        lowerCoors = self.lower_coordinates()

        y_upper_func = sp_interp.interp1d(x=upperCoors[:, 0], y=upperCoors[:, 1], copy=False, fill_value='extrapolate')
        y_lower_func = sp_interp.interp1d(x=lowerCoors[:, 0], y=lowerCoors[:, 1], copy=False, fill_value='extrapolate')

        y_upper = y_upper_func(x)
        y_lower = y_lower_func(x)

        thickness = np.maximum(y_upper - y_lower, 0)

        return thickness 
Example #3
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def bound_by_data(Z, Data):
    """
    Determine lower and upper bound for each dimension from the Data, and project 
    Z so that all points in Z live in the bounds.

    Z: m x d 
    Data: n x d

    Return a projected Z of size m x d.
    """
    n, d = Z.shape
    Low = np.min(Data, 0)
    Up = np.max(Data, 0)
    LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
    UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)

    Z = np.maximum(LowMat, Z)
    Z = np.minimum(UpMat, Z)
    return Z 
Example #4
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example #5
Source File: eucl_cones_model.py    From hyperbolic_cones with Apache License 2.0 6 votes vote down vote up
def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
        norm_parents = np.linalg.norm(parent_vectors, axis=1)
        norms_other = np.linalg.norm(other_vectors, axis=1)
        euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment

        if not rel_reversed:
            cos_angles_child = (norms_other**2 - norm_parents**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_parents) # 1 + neg_size
            angles_psi_parent = np.arcsin(K / norm_parents) # scalar
        else:
            cos_angles_child = (norm_parents**2 - norms_other**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_other) # 1 + neg_size
            angles_psi_parent = np.arcsin(K / norms_other) # 1 + neg_size

        assert not np.isnan(cos_angles_child).any()
        clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
        clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        return np.maximum(0, angles_child - angles_psi_parent) 
Example #6
Source File: util.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def truncate0(x, axis=None, strict=False, tol=1e-13):
    '''make sure everything in x is non-negative'''
    # the maximum along axis
    maxes = np.maximum(np.amax(x, axis=axis), 1e-300)
    # the negative part of minimum along axis
    mins = np.maximum(-np.amin(x, axis=axis), 0.0)

    # assert the negative numbers are small (relative to maxes)
    assert np.all(mins <= tol * maxes)

    if axis is not None:
        idx = [slice(None)] * x.ndim
        idx[axis] = np.newaxis
        mins = mins[idx]
        maxes = maxes[idx]

    if strict:
        # set everything below the tolerance to 0
        return set0(x, x < tol * maxes)
    else:
        # set everything of same magnitude as most negative number, to 0
        return set0(x, x < 2 * mins) 
Example #7
Source File: poincare_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _maxmargin_loss_fn(poincare_dists, maxmargin_margin):
        """
        Parameters
        ----------
        poincare_dists : numpy.array
            All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).

        Returns
        ----------
        max-margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
        """
        positive_term = poincare_dists[0]
        negative_terms = poincare_dists[1:]
        return grad_np.maximum(0, maxmargin_margin + positive_term - negative_terms).sum() 
Example #8
Source File: activations.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def leakyrelu(z, a=0.01):
    return np.maximum(z * a, z) 
Example #9
Source File: activations.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def relu(z):
    return np.maximum(0, z) 
Example #10
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_maximum_equal_values():
    def fun(x): return np.maximum(x, x)
    check_grads(fun)(1.0) 
Example #11
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_maximum(): combo_check(np.maximum, [0, 1])(
                               [R(1), R(1,4), R(3, 4)],
                               [R(1), R(1,4), R(3, 4)]) 
Example #12
Source File: generative_adversarial_net.py    From autograd with MIT License 5 votes vote down vote up
def relu(x):       return np.maximum(0, x) 
Example #13
Source File: ode_net.py    From autograd with MIT License 5 votes vote down vote up
def nn_predict(inputs, t, params):
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = np.maximum(0, outputs)
    return outputs 
Example #14
Source File: variational_autoencoder.py    From autograd with MIT License 5 votes vote down vote up
def relu(x):    return np.maximum(0, x) 
Example #15
Source File: beta_geo_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def conditional_probability_alive_matrix(
        self, 
        max_frequency=None, 
        max_recency=None
    ):
        """
        Compute the probability alive matrix.

        Uses the ``conditional_probability_alive()`` method to get calculate the matrix.

        Parameters
        ----------
        max_frequency: float, optional
            the maximum frequency to plot. Default is max observed frequency.
        max_recency: float, optional
            the maximum recency to plot. This also determines the age of the
            customer. Default to max observed age.

        Returns
        -------
        matrix:
            A matrix of the form [t_x: historical recency, x: historical frequency]
        """

        max_frequency = max_frequency or int(self.data["frequency"].max())
        max_recency = max_recency or int(self.data["T"].max())

        return np.fromfunction(
            self.conditional_probability_alive, (max_frequency + 1, max_recency + 1), T=max_recency
        ).T 
Example #16
Source File: beta_geo_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def conditional_probability_alive(
        self, 
        frequency, 
        recency, 
        T
    ):
        """
        Compute conditional probability alive.

        Compute the probability that a customer with history
        (frequency, recency, T) is currently alive.

        From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf

        Parameters
        ----------
        frequency: array or scalar
            historical frequency of customer.
        recency: array or scalar
            historical recency of customer.
        T: array or scalar
            age of the customer.

        Returns
        -------
        array
            value representing a probability
        """

        r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")

        log_div = (r + frequency) * np.log((alpha + T) / (alpha + recency)) + np.log(
            a / (b + np.maximum(frequency, 1) - 1)
        )

        return np.atleast_1d(np.where(frequency == 0, 1.0, expit(-log_div))) 
Example #17
Source File: beta_geo_fitter.py    From lifetimes with MIT License 5 votes vote down vote up
def _negative_log_likelihood(
        log_params, 
        freq, 
        rec, 
        T, 
        weights, 
        penalizer_coef
    ):
        """
        The following method for calculatating the *log-likelihood* uses the method
        specified in section 7 of [2]_. More information can also be found in [3]_.

        References
        ----------
        .. [2] Fader, Peter S., Bruce G.S. Hardie, and Ka Lok Lee (2005a),
        "Counting Your Customers the Easy Way: An Alternative to the
        Pareto/NBD Model," Marketing Science, 24 (2), 275-84.
        .. [3] http://brucehardie.com/notes/004/
        """

        warnings.simplefilter(action="ignore", category=FutureWarning)

        params = np.exp(log_params)
        r, alpha, a, b = params

        A_1 = gammaln(r + freq) - gammaln(r) + r * np.log(alpha)
        A_2 = gammaln(a + b) + gammaln(b + freq) - gammaln(b) - gammaln(a + b + freq)
        A_3 = -(r + freq) * np.log(alpha + T)
        A_4 = np.log(a) - np.log(b + np.maximum(freq, 1) - 1) - (r + freq) * np.log(rec + alpha)

        penalizer_term = penalizer_coef * sum(params ** 2)
        ll = weights * (A_1 + A_2 + np.log(np.exp(A_3) + np.exp(A_4) * (freq > 0)))

        return -ll.sum() / weights.sum() + penalizer_term 
Example #18
Source File: poincare_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._compute_distances()

        if self.loss_type == 'nll':
            # NLL loss from the NIPS paper.
            exp_negative_distances = np.exp(-self.poincare_dists)  # (1 + neg_size, batch_size)
            # Remove the value for the true edge (u,v) from the partition function
            Z = exp_negative_distances[1:].sum(axis=0)  # (batch_size)
            self.exp_negative_distances = exp_negative_distances  # (1 + neg_size, batch_size)
            self.Z = Z # (batch_size)

            self.pos_loss = self.poincare_dists[0].sum()
            self.neg_loss = np.log(self.Z).sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'neg':
            # NEG loss function:
            # - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
            positive_term = np.log(1.0 + np.exp((- self.neg_r + self.poincare_dists[0]) / self.neg_t))  # (batch_size)
            negative_terms = self.neg_mu * \
                             np.log(1.0 + np.exp((self.neg_r - self.poincare_dists[1:]) / self.neg_t)) # (1 + neg_size, batch_size)

            self.pos_loss = positive_term.sum()
            self.neg_loss = negative_terms.sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'maxmargin':
            # max - margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
            self.loss = np.maximum(0, self.maxmargin_margin + self.poincare_dists[0] - self.poincare_dists[1:]).sum() # scalar
            self.pos_loss = self.loss
            self.neg_loss = self.loss

        else:
            raise ValueError('Unknown loss type : ' + self.loss_type)

        self._loss_computed = True 
Example #19
Source File: order_emb_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def is_a_scores_vector_batch(self, alpha, parent_vectors, other_vectors, rel_reversed):
        if not rel_reversed:
            return np.linalg.norm(np.maximum(0, parent_vectors - other_vectors), axis=1)
        else:
            return np.linalg.norm(np.maximum(0, - parent_vectors + other_vectors), axis=1) 
Example #20
Source File: order_emb_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        if not self.rels_reversed:
            self.entailment_penalty = np.maximum(0, self.vectors_u - self.vectors_v) # (1 + negative_size, dim, batch_size).
        else:
            self.entailment_penalty = np.maximum(0, - self.vectors_u + self.vectors_v) # (1 + negative_size, dim, batch_size).

        self.energy_vec = np.linalg.norm(self.entailment_penalty, axis=1)**2 # (1 + negative_size, batch_size).
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example #21
Source File: order_emb_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]
        if not rels_reversed:
            entailment_penalty = grad_np.maximum(0, vector_u - vectors_v) # (1 + negative_size, dim).
        else:
            entailment_penalty = grad_np.maximum(0, - vector_u + vectors_v) # (1 + negative_size, dim).

        energy_vec = grad_np.linalg.norm(entailment_penalty, axis=1) ** 2
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example #22
Source File: hyp_cones_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
        self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)

        self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
        self.g_sqrt = np.sqrt(self.g)

        self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt

        if not self.rels_reversed:
            # u is x , v is y
            # (1 + neg_size, batch_size)
            child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
            self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
            self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)

        else:
            # v is x , u is y
            # (1 + neg_size, batch_size)
            child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
            self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
            self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)

        self.cos_angles_child = child_numerator / self.child_numitor
        # To avoid numerical errors
        self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
        self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
        self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        self.angle_diff = self.angles_child - self.angles_psi_parent
        self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example #23
Source File: eucl_cones_model.py    From hyperbolic_cones with Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
        euclidean_dists_sq = self.euclidean_dists ** 2

        if not self.rels_reversed:
            # (1 + neg_size, batch_size)
            child_numerator = self.norms_v_sq - self.norms_u_sq - euclidean_dists_sq
            self.child_numitor = 2 * self.euclidean_dists * self.norms_u
            self.angles_psi_parent = np.arcsin(self.K / self.norms_u) # (1, batch_size)

        else:
            # (1 + neg_size, batch_size)
            child_numerator = self.norms_u_sq - self.norms_v_sq - euclidean_dists_sq
            self.child_numitor = 2 * self.euclidean_dists * self.norms_v
            self.angles_psi_parent = np.arcsin(self.K / self.norms_v) # (1 + neg_size, batch_size)

        self.cos_angles_child = child_numerator / self.child_numitor
        # To avoid numerical errors
        self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
        self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
        self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        self.angle_diff = self.angles_child - self.angles_psi_parent
        self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example #24
Source File: optimizers.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def sgd(fun, x0, fun_and_jac, pieces, stepsize, num_iters, bounds=None, callback=None, iter_per_output=10, rgen=np.random):
    x0 = np.array(x0)

    if callback is None:
        callback = lambda *a, **kw: None

    if bounds is None:
        bounds = [(None, None) for _ in x0]
    lower, upper = zip(*bounds)
    lower = [-float('inf') if l is None else l
             for l in lower]
    upper = [float('inf') if u is None else u
             for u in upper]

    def truncate(x):
        return np.maximum(np.minimum(x, upper), lower)

    x = x0
    for nit in range(num_iters):
        i = rgen.randint(pieces)
        f_x, g_x = fun_and_jac(x, i)
        x = truncate(x - stepsize * g_x)
        if nit % iter_per_output == 0:
            callback(x, f_x, nit)

    return scipy.optimize.OptimizeResult({'x': x, 'fun': f_x, 'jac': g_x}) 
Example #25
Source File: sfs.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def avg_pairwise_hets(self):
        # avg number of hets per ind per pop (assuming Hardy-Weinberg)
        n_nonmissing = np.sum(self.configs.value, axis=2)
        # for denominator, assume 1 allele is drawn from whole sample, and 1
        # allele is drawn only from nomissing alleles
        denoms = np.maximum(n_nonmissing * (self.sampled_n - 1), 1.0)
        p_het = 2 * self.configs.value[:, :, 0] * \
            self.configs.value[:, :, 1] / denoms

        return self.freqs_matrix.T.dot(p_het) 
Example #26
Source File: hyp_cones_model.py    From hyperbolic_cones with Apache License 2.0 4 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]

        norm_u = grad_np.linalg.norm(vector_u)
        norms_v = grad_np.linalg.norm(vectors_v, axis=1)
        euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
        dot_prod = (vector_u * vectors_v).sum(axis=1)

        if not rels_reversed:
            # u is x , v is y
            cos_angle_child = (dot_prod * (1 + norm_u ** 2) - norm_u ** 2 * (1 + norms_v ** 2)) /\
                              (norm_u * euclidean_dists * grad_np.sqrt(1 + norms_v ** 2 * norm_u ** 2 - 2 * dot_prod))
            angles_psi_parent = grad_np.arcsin(self.K * (1 - norm_u**2) / norm_u) # scalar
        else:
            # v is x , u is y
            cos_angle_child = (dot_prod * (1 + norms_v ** 2) - norms_v **2 * (1 + norm_u ** 2) ) /\
                              (norms_v * euclidean_dists * grad_np.sqrt(1 + norms_v**2 * norm_u**2 - 2 * dot_prod))
            angles_psi_parent = grad_np.arcsin(self.K * (1 - norms_v**2) / norms_v) # 1 + neg_size

        # To avoid numerical errors
        clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
        clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = grad_np.arccos(clipped_cos_angle_child)  # 1 + neg_size

        energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example #27
Source File: eucl_cones_model.py    From hyperbolic_cones with Apache License 2.0 4 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]

        norm_u = grad_np.linalg.norm(vector_u)
        norms_v = grad_np.linalg.norm(vectors_v, axis=1)
        euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)

        if not rels_reversed:
            # u is x , v is y
            cos_angle_child = (norms_v**2 - norm_u**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_u) # 1 + neg_size
            angles_psi_parent = grad_np.arcsin(self.K / norm_u) # scalar
        else:
            # v is x , u is y
            cos_angle_child = (norm_u**2 - norms_v**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_v) # 1 + neg_size
            angles_psi_parent = grad_np.arcsin(self.K / norms_v) # 1 + neg_size

        # To avoid numerical errors
        clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
        clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = grad_np.arccos(clipped_cos_angle_child)  # 1 + neg_size

        energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum()