Python scipy.sqrt() Examples

The following are 30 code examples of scipy.sqrt(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy , or try the search function .
Example #1
Source File: c14_16_up_and_out_call.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier):
    n_steps=100. 
    dt=T/n_steps 
    total=0 
    for j in sp.arange(0, n_simulation): 
        sT=s0 
        out=False
        for i in range(0,int(n_steps)): 
            e=sp.random.normal() 
            sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) 
            if sT>barrier: 
               out=True 
        if out==False: 
            total+=bsCall(s0,x,T,r,sigma) 
    return total/n_simulation 
# 
Example #2
Source File: sum_stats_parsers.py    From ldpred with MIT License 6 votes vote down vote up
def get_beta(pval_read, raw_beta, beta_read, line_dict, header_dict, se, 
             z_from_se, N, eff_type, se_inferred_zscores):
    if eff_type=='BLUP':
        return raw_beta
    if z_from_se:
        assert se in header_dict, "SE was not specified in summary statistics provided, which is necessary when the 'z_from_se' flag is used."
        se_read = float(line_dict[header_dict[se]])
        if se_read==0 or not isfinite(se_read):
            return None
        se_inferred_zscores += 1
        return get_beta_from_se(beta_read, se_read, eff_type, raw_beta, N)
    else:
        if pval_read==0 or not isfinite(stats.norm.ppf(pval_read)):
            #Attempt to Parse SEs to infer Z-score
            if not se in header_dict:
                return None
            else:
                se_read = float(line_dict[header_dict[se]])
                if se_read==0 or not isfinite(se_read):
                    return None

                se_inferred_zscores += 1
                return get_beta_from_se(beta_read, se_read, eff_type, raw_beta, N)
        else:               
            return sp.sign(raw_beta) * stats.norm.ppf(pval_read / 2.0)/ sp.sqrt(N) 
Example #3
Source File: admm.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_residuals(self):
        """Compute residuals and stopping thresholds."""

        if self.opt['AutoRho', 'StdResiduals']:
            r = linalg.norm(self.rsdl_r(self.AXnr, self.Y))
            s = linalg.norm(self.rsdl_s(self.Yprev, self.Y))
            epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
                self.rsdl_rn(self.AXnr, self.Y)*self.opt['RelStopTol']
            edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
                self.rsdl_sn(self.U)*self.opt['RelStopTol']
        else:
            rn = self.rsdl_rn(self.AXnr, self.Y)
            if rn == 0.0:
                rn = 1.0
            sn = self.rsdl_sn(self.U)
            if sn == 0.0:
                sn = 1.0
            r = linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn
            s = linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn
            epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
                self.opt['RelStopTol']
            edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
                self.opt['RelStopTol']

        return r, s, epri, edua 
Example #4
Source File: BWRS.py    From pychemqt with GNU General Public License v3.0 6 votes vote down vote up
def _fug(self, Z, xi):
        rho=self.P.atm/Z/R_atml/self.T
        tita=[]
        for i in range(len(self.componente)):
            suma=0
            for j in range(len(self.componente)):
                suma+=xi[j]*(-self.Ao**0.5*self.Aoi[i]**0.5*(1-self.kij[i][j]) \
                                    -  self.Co**0.5*self.Coi[i]**0.5*(1-self.kij[i][j])**3/self.T**2 \
                                    + self.Do**0.5*self.Doi[i]**0.5*(1-self.kij[i][j])**4/self.T**3 \
                                    -  self.Eo**0.5*self.Eoi[i]**0.5*(1-self.kij[i][j])**5/self.T**4)
#                print suma
            lo=R_atml*self.T*log(rho*R_atml*self.T*xi[i]) \
                    + rho*(self.Bo+self.Boi[i])*R_atml*self.T \
                    + 2*rho*suma \
                    + rho**2/2*(3*(self.b**2*self.bi[i])**(1./3)*R_atml*self.T-3*(self.a**2*self.ai[i])**(1./3)-3*(self.d**2*self.di[i])**(1./3)/self.T) \
                    + self.alfa*rho**5/5*(3*(self.a**2*self.ai[i])**(1./3)+3*(self.d**2*self.di[i])**(1./3)/self.T) \
                    + 3*rho**5/5*(self.a+self.d/self.T)*(self.alfa**2*self.alfai[i])**(1./3) \
                    + 3*(self.c**2*self.ci[i])**(1./3)*rho**2/self.T**2*((1-exp(-self.gamma*rho**2))/self.gamma/rho**2-exp(-self.gamma*rho**2)/2) \
                    - (2*self.c*sqrt(self.gammai[i]/self.gamma)**0.5/self.gamma/self.T**2)*((1-exp(-self.gamma*rho**2))*(1+self.gamma*rho**2+self.gamma**2*rho**4/2))
            tita.append(exp(lo/R_atml/self.T))
        return tita 
Example #5
Source File: LDpred_fast.py    From ldpred with MIT License 6 votes vote down vote up
def ldpred_fast(betas_blup, h2=None, Ns=None, p_c=None):
    """
    LDpred-fast
    
    Joel Mefford's sparsified BLUP shrink. 
    """
    M = len(betas_blup)
    h2mn = h2+M/Ns
    h2mpn = h2+(M*p_c)/Ns
    C1 = h2mn/h2mpn
    C2 = h2/h2mn
    V_nc = (C2/Ns)*(1-C2*h2)
    V_c = C2*(h2/(M*p_c))+V_nc
    P_beta_ncaus = stats.norm.pdf(betas_blup,scale=sp.sqrt(V_nc))
    P_beta_caus = stats.norm.pdf(betas_blup,scale=sp.sqrt(V_c))
    P_caus_beta =  P_beta_caus *p_c/(P_beta_caus *p_c + P_beta_ncaus *(1-p_c))
    beta_jms = betas_blup * C1 * P_caus_beta 
    return beta_jms 
Example #6
Source File: c12_19_up_and_out_call.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier):
    n_steps=100. 
    dt=T/n_steps 
    total=0 
    for j in sp.arange(0, n_simulation): 
        sT=s0 
        out=False
        for i in range(0,int(n_steps)): 
            e=sp.random.normal() 
            sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) 
            if sT>barrier: 
               out=True 
        if out==False: 
            total+=bsCall(s0,x,T,r,sigma) 
    return total/n_simulation 
# 
Example #7
Source File: gp.py    From GPPVAE with Apache License 2.0 6 votes vote down vote up
def generate_data(N, S, L):

        # generate genetics
        G = 1.0 * (sp.rand(N, S) < 0.2)
        G -= G.mean(0)
        G /= G.std(0) * sp.sqrt(G.shape[1])

        # generate latent phenotypes
        Zg = sp.dot(G, sp.randn(G.shape[1], L))
        Zn = sp.randn(N, L)

        # generate variance exapleind
        vg = sp.linspace(0.8, 0, L)

        # rescale and sum
        Zg *= sp.sqrt(vg / Zg.var(0))
        Zn *= sp.sqrt((1 - vg) / Zn.var(0))
        Z = Zg + Zn

        return Z, G 
Example #8
Source File: c10_37_volatility_smile.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def implied_vol_call_min(S,X,T,r,c): 
    from scipy import log,exp,sqrt,stats 
    implied_vol=1.0
    min_value=1000
    for i in range(10000): 
        sigma=0.0001*(i+1)
        d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) 
        d2 = d1-sigma*sqrt(T)
        c2=S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2) 
        abs_diff=abs(c2-c)
        if abs_diff<min_value: 
            min_value=abs_diff 
            implied_vol=sigma 
            k=i
    return implied_vol


# Step 3: get call option data 
Example #9
Source File: instrument_model.py    From isofit with Apache License 2.0 6 votes vote down vote up
def _column_covariances(X, uniformity_thresh):
    """."""

    Xvert = _high_frequency_vert(X, sigma=4.0)
    Xvertp = _high_frequency_vert(X, sigma=3.0)
    models = []
    use_C = []
    for i in range(X.shape[2]):
        xsub = Xvert[:, :, i]
        xsubp = Xvertp[:, :, i]
        mu = xsub.mean(axis=0)
        dists = s.sqrt(pow((xsub - mu), 2).sum(axis=1))
        distsp = s.sqrt(pow((xsubp - mu), 2).sum(axis=1))
        thresh = _percentile(dists, 95.0)
        uthresh = dists * uniformity_thresh
        #use       = s.logical_and(dists<thresh, abs(dists-distsp) < uthresh)
        use = dists < thresh
        C = s.cov(xsub[use, :], rowvar=False)
        [U, V, D] = svd(C)
        V[V < 1e-8] = 1e-8
        C = U.dot(s.diagflat(V)).dot(D)
        models.append(C)
        use_C.append(use)
    return s.array(models), Xvert, Xvertp, s.array(use_C).T 
Example #10
Source File: c13_08_KMF_function.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def KMV_f(E,D,T,r,sigmaE):
    n=10000
    m=2000
    diffOld=1e6     # a very big number
    for i in sp.arange(1,10):
        for j in sp.arange(1,m):
            A=E+D/2+i*D/n
            sigmaA=0.05+j*(1.0-0.001)/m
            d1 = (log(A/D)+(r+sigmaA*sigmaA/2.)*T)/(sigmaA*sqrt(T))
            d2 = d1-sigmaA*sqrt(T)
            diff4E= (A*N(d1)-D*exp(-r*T)*N(d2)-E)/A  # scale by assets
            diff4A= A/E*N(d1)*sigmaA-sigmaE          # a small number already
            diffNew=abs(diff4E)+abs(diff4A)
            if diffNew<diffOld:
               diffOld=diffNew
               output=(round(A,2),round(sigmaA,4),round(diffNew,5))
    return output
# 
Example #11
Source File: c14_25_up_and_out_call.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier):
    n_steps=100. 
    dt=T/n_steps 
    total=0 
    for j in sp.arange(0, n_simulation): 
        sT=s0 
        out=False
        for i in range(0,int(n_steps)): 
            e=sp.random.normal() 
            sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) 
            if sT>barrier: 
               out=True 
        if out==False: 
            total+=bsCall(s0,x,T,r,sigma) 
    return total/n_simulation 
# 
Example #12
Source File: c14_26_up_and_out_call2.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier):
    n_steps=100.
    dt=T/n_steps 
    total=0
    for j in range(0, n_simulation): 
        sT=s0
        out=False
        for i in range(0,int(n_steps)): 
            e=sp.random.normal()
            sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) 
            if sT>barrier:
                out=True 
    if out==False:
       total+=p4f.bs_call(s0,x,T,r,sigma) 
    return total/n_simulation
# 
Example #13
Source File: c14_20_lookback_min_price_as_strike.py    From Python-for-Finance-Second-Edition with MIT License 6 votes vote down vote up
def lookback_min_price_as_strike(s,T,r,sigma,n_simulation): 
    n_steps=100
    dt=T/n_steps
    total=0
    for j in range(n_simulation): 
        min_price=100000.	# a very big number 
        sT=s
        for i in range(int(n_steps)): 
            e=sp.random.normal()
            sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) 
            if sT<min_price:
                min_price=sT
                #print 'j=',j,'i=',i,'total=',total 
                total+=p4f.bs_call(s,min_price,T,r,sigma)
    return total/n_simulation

# 
Example #14
Source File: admm.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rsdl_rn(self, AX, Y):
        """Compute primal residual normalisation term.

        Overriding this method is required if methods :meth:`cnst_A`,
        :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
        overridden.
        """

        if not hasattr(self, '_cnst_nrm_c'):
            self._cnst_nrm_c = np.sqrt(linalg.norm(self.cnst_c0())**2 +
                                       linalg.norm(self.cnst_c1())**2)
        return max((linalg.norm(AX), linalg.norm(Y), self._cnst_nrm_c)) 
Example #15
Source File: healmap.py    From astrolibpy with GNU General Public License v3.0 5 votes vote down vote up
def _sigma(z):
		if z < 0.:
			return -(2.-sc.sqrt((3.*(1+z))))

		else:
			return 2.-sc.sqrt((3.*(1-z))) 
Example #16
Source File: admm.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rsdl_s(self, Yprev, Y):
        """Compute dual residual vector."""

        # Since s = rho A^T B (y^(k+1) - y^(k)) and B = -(I I I ...)^T,
        # the correct calculation here would involve replicating (Yprev - Y)
        # on the axis on which the blocks of X are stacked. Since this would
        # require allocating additional memory, and since it is only the norm
        # of s that is required, instead of replicating the vector it is
        # scaled to have the same l2 norm as the replicated vector
        return np.sqrt(self.Nb)*self.rho*(Yprev - Y) 
Example #17
Source File: c14_02_chooserOption.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def callAndPut(S,X,T,r,sigma,tao,type='C'):
    d1=(log(S/X)+r*T+0.5*sigma*sigma*tao)/(sigma*sqrt(tao)) 
    d2 = d1-sigma*sqrt(tao)
    if type.upper()=='C':
        c=S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)
        return c
    else:
        p=X*exp(-r*T)*stats.norm.cdf(-d2)-S*stats.norm.cdf(-d1)
        return p
# 
Example #18
Source File: sum_stats_parsers.py    From ldpred with MIT License 5 votes vote down vote up
def get_beta_from_se(beta_read, se_read, eff_type, raw_beta, N):
    if se_read==0:
        return 
    if eff_type=='LINREG' or eff_type=='LOGOR':
        abs_beta = sp.absolute(beta_read)/se_read
    elif eff_type=='OR':
        abs_beta = sp.absolute(1-beta_read)/se_read
    else: 
        raise Exception('Unknown effect type')      
    return sp.sign(raw_beta) * abs_beta/ sp.sqrt(N) 
Example #19
Source File: LDpred_gibbs.py    From ldpred with MIT License 5 votes vote down vote up
def prepare_constants(ldpred_n,ns,m,p,h2,sampl_var_shrink_factor):
    Mp = m * p
    hdmp = (h2 / Mp)
    const_dict = {'Mp':Mp, 'hdmp':hdmp}
    rv_scalars = sp.zeros(m)
    if ldpred_n is not None:
        hdmpn = hdmp + 1.0 / ldpred_n
        hdmp_hdmpn = (hdmp / hdmpn)
        c_const = (p / sp.sqrt(hdmpn))
        d_const = (1.0 - p) / (sp.sqrt(1.0 / ldpred_n))
        const_dict['n']=ldpred_n
        const_dict['hdmpn']=hdmpn
        const_dict['hdmp_hdmpn']=hdmp_hdmpn
        const_dict['c_const']=c_const
        const_dict['d_const']=d_const
        rv_scalars[:]=sampl_var_shrink_factor* sp.sqrt((hdmp_hdmpn) * (1.0 / ldpred_n))
    else:
        snp_dict = {}
        for i in range(m):
            ni = ns[i]
            hdmpn_i = hdmp + 1.0 / ni
            hdmp_hdmpn_i = (hdmp / hdmpn_i)
            c_const_i = (p / sp.sqrt(hdmpn_i))
            d_const_i = (1.0 - p) / (sp.sqrt(1.0 / ni))
            snp_dict[i]={'n':ni, 'hdmpn':hdmpn_i, 
                           'hdmp_hdmpn':hdmp_hdmpn_i, 
                           'c_const':c_const_i, 
                           'd_const':d_const_i}
            rv_scalars[i]=sampl_var_shrink_factor* sp.sqrt((hdmp_hdmpn_i) * (1.0 / ni))
        const_dict['snp_dict']=snp_dict
    const_dict['rv_scalars']=rv_scalars
    return const_dict 
Example #20
Source File: gp.py    From GPPVAE with Apache License 2.0 5 votes vote down vote up
def nll_ineff(self, X, Vs):
        vs = self.get_vs()
        V = torch.cat([torch.sqrt(vs[i]) * V for i, V in enumerate(Vs)], 1)
        K = V.mm(V.transpose(0, 1)) + vs[-1] * torch.eye(X.shape[0]).cuda()
        Uk, Shk, Vk = torch.svd(K)
        Ki = torch.inverse(K)
        # Ki = (Vk / Shk).mm(torch.transpose(Vk, 0, 1))
        Xb = Ki.mm(X)

        quad_term = torch.einsum("ij,ij->i", [X, Xb])[:, None]
        logdetK = X.shape[1] * torch.log(Shk).sum()
        nll = 0.5 * quad_term + 0.5 * logdetK / X.shape[0]

        return nll 
Example #21
Source File: gp.py    From GPPVAE with Apache License 2.0 5 votes vote down vote up
def U_UBi_Shb(self, Vs, vs):

        # compute U and V
        V = torch.cat([torch.sqrt(vs[i]) * V for i, V in enumerate(Vs)], 1)
        U = V / torch.sqrt(vs[-1])
        eye = torch.eye(U.shape[1]).cuda()
        B = torch.mm(torch.transpose(U, 0, 1), U) + eye
        # cholB = torch.potrf(B, upper=False)
        # Bi = torch.potri(cholB, upper=False)
        Ub, Shb, Vb = torch.svd(B)
        # Bi = (Vb / Shb).mm(torch.transpose(Vb, 0, 1))
        Bi = torch.inverse(B)
        UBi = torch.mm(U, Bi)

        return U, UBi, Shb 
Example #22
Source File: util.py    From Azimuth with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ranktrafo(data):
    X = data.values[:, None]
    Is = X.argsort(axis=0)
    RV = sp.zeros_like(X)
    rank = sp.zeros_like(X)
    for i in xrange(X.shape[1]):
        x =  X[:,i]
        rank = sp.stats.rankdata(x)
        rank /= (X.shape[0]+1)
        RV[:,i] = sp.sqrt(2) * sp.special.erfinv(2*rank-1)

    return RV.flatten() 
Example #23
Source File: array_metrics.py    From e3fp with GNU Lesser General Public License v3.0 5 votes vote down vote up
def pearson(X, Y=None):
    """Compute the Pearson correlation between `X` and `Y`.

    Parameters
    ----------
    X : array_like or sparse matrix
        with shape (`n_fprints_X`, `n_bits`).
    Y : array_like or sparse matrix, optional
        with shape (`n_fprints_Y`, `n_bits`).

    Returns
    -------
    pearson : array of shape (`n_fprints_X`, `n_fprints_Y`)


    See Also
    --------
    soergel: Soergel similarity for non-binary data
    cosine, dice, tanimoto
    """
    X, Y = _check_array_pair(X, Y)
    Xlen = X.shape[0]
    if issparse(X):
        X = vstack((X, Y), format="csr")
        X = X - X.mean(axis=1)
        cov = (X * X.T) / (X.shape[1] - 1.0)
        d = np.sqrt(np.diag(cov))
        with np.errstate(divide="ignore"):  # handle 0 in denominator
            pearson = cov / np.outer(d, d)
    else:
        with np.errstate(divide="ignore"):  # handle 0 in denominator
            pearson = scipy.corrcoef(X, Y)
    return np.asarray(np.nan_to_num(pearson[:Xlen, Xlen:])) 
Example #24
Source File: c2_09_bsCall.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def bsCall(S,X,T,r,sigma):
    from scipy import log,exp,sqrt,stats
    d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T))
    d2 = d1-sigma*sqrt(T)
    return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2) 
Example #25
Source File: array_metrics.py    From e3fp with GNU Lesser General Public License v3.0 5 votes vote down vote up
def _sparse_cosine(X, Y):
    Xnorm = scipy.sqrt(X.multiply(X).sum(axis=1))
    if Y is X:
        Ynorm = Xnorm
    else:
        Ynorm = scipy.sqrt(Y.multiply(Y).sum(axis=1))
    XY = (X * Y.T).toarray()
    with np.errstate(divide="ignore"):  # handle 0 in denominator
        return np.nan_to_num(XY / (Xnorm * Ynorm.T)) 
Example #26
Source File: wigner.py    From ARC-Alkali-Rydberg-Calculator with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def CG(j1, m1, j2, m2, j3, m3):
    r"""
        Clebsch–Gordan (CG) coefficients

        Args:
            j1,m1,j2,m2,j3,m3: parameters of
                :math:`\langle j_1, m_1, j_2, m_2 | j_1, j_2, j_3, m_3 \rangle`

    """
    return Wigner3j(j1, j2, j3, m1, m2, -m3) \
        * sqrt(2 * j3 + 1) * (-1)**(j1 - j2 + m3) 
Example #27
Source File: blackandscholes.py    From wallstreet with MIT License 5 votes vote down vote up
def _BlackScholesCall(S, K, T, sigma, r, q):
            d1 = (log(S/K) + (r - q + (sigma**2)/2)*T)/(sigma*sqrt(T))
            d2 = d1 - sigma*sqrt(T)
            return S*exp(-q*T)*norm.cdf(d1) - K*exp(-r*T)*norm.cdf(d2) 
Example #28
Source File: blackandscholes.py    From wallstreet with MIT License 5 votes vote down vote up
def _BlackScholesPut(S, K, T, sigma, r, q):
            d1 = (log(S/K) + (r - q + (sigma**2)/2)*T)/(sigma*sqrt(T))
            d2 = d1 - sigma*sqrt(T)
            return  K*exp(-r*T)*norm.cdf(-d2) - S*exp(-q*T)*norm.cdf(-d1) 
Example #29
Source File: blackandscholes.py    From wallstreet with MIT License 5 votes vote down vote up
def _fprime(self, sigma):
        logSoverK = log(self.S/self.K)
        n12 = ((self.r + sigma**2/2)*self.T)
        numerd1 = logSoverK + n12
        d1 = numerd1/(sigma*sqrt(self.T))
        return self.S*sqrt(self.T)*norm.pdf(d1)*exp(-self.r*self.T) 
Example #30
Source File: array_metrics.py    From e3fp with GNU Lesser General Public License v3.0 5 votes vote down vote up
def cosine(X, Y=None, assume_binary=False):
    """Compute the Cosine similarities between `X` and `Y`.

    Parameters
    ----------
    X : array_like or sparse matrix
        with shape (`n_fprints_X`, `n_bits`).
    Y : array_like or sparse matrix, optional
        with shape (`n_fprints_Y`, `n_bits`).
    assume_binary : bool, optional
        Assume data is binary (results in efficiency boost). If data is not
        binary, the result will be incorrect.

    Returns
    -------
    cosine : array of shape (`n_fprints_X`, `n_fprints_Y`)

    See Also
    --------
    dice, soergel, tanimoto
    """
    X, Y = _check_array_pair(X, Y)
    if not issparse(X):
        return 1.0 - scipy.spatial.distance.cdist(X, Y, metric="cosine")
    if assume_binary:
        Xbits, Ybits, XYbits = _get_bitcount_arrays(X, Y, return_XYbits=True)
        with np.errstate(divide="ignore"):  # handle 0 in denominator
            return np.asarray(np.nan_to_num(XYbits / np.sqrt(Xbits * Ybits.T)))
    else:
        return _sparse_cosine(X, Y)