Python scipy.stats.gamma.rvs() Examples

The following are 11 code examples of scipy.stats.gamma.rvs(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.stats.gamma , or try the search function .
Example #1
Source File: conviction_system_logic.py    From cadCAD with MIT License 5 votes vote down vote up
def gen_new_proposal(network, funds, supply, total_funds, trigger_func):
    j = len([node for node in network.nodes])
    network.add_node(j)
    network.nodes[j]['type']="proposal"
    
    network.nodes[j]['conviction']=0
    network.nodes[j]['status']='candidate'
    network.nodes[j]['age']=0
    
    rescale = scale_factor*funds/total_funds
    r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
    network.node[j]['funds_requested'] = r_rv
    
    network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
    
    participants = get_nodes_by_type(network, 'participant')
    proposing_participant = np.random.choice(participants)
    
    for i in participants:
        network.add_edge(i, j)
        if i==proposing_participant:
            network.edges[(i, j)]['affinity']=1
        else:
            rv = np.random.rand()
            a_rv = 1-4*(1-rv)*rv #polarized distribution
            network.edges[(i, j)]['affinity'] = a_rv
            
        network.edges[(i, j)]['conviction'] = 0
        network.edges[(i,j)]['tokens'] = 0
    return network 
Example #2
Source File: conviction_system_logic_sim.py    From cadCAD with MIT License 5 votes vote down vote up
def gen_new_proposal(network, funds, supply, trigger_func):
    j = len([node for node in network.nodes])
    network.add_node(j)
    network.nodes[j]['type']="proposal"
    
    network.nodes[j]['conviction']=0
    network.nodes[j]['status']='candidate'
    network.nodes[j]['age']=0
    
    rescale = scale_factor*funds
    r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
    network.node[j]['funds_requested'] = r_rv
    
    network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
    
    participants = get_nodes_by_type(network, 'participant')
    proposing_participant = np.random.choice(participants)
    
    for i in participants:
        network.add_edge(i, j)
        if i==proposing_participant:
            network.edges[(i, j)]['affinity']=1
        else:
            rv = np.random.rand()
            a_rv = 1-4*(1-rv)*rv #polarized distribution
            network.edges[(i, j)]['affinity'] = a_rv
            
        network.edges[(i, j)]['conviction'] = 0
        network.edges[(i,j)]['tokens'] = 0
    return network 
Example #3
Source File: copularnd.py    From copula-py with GNU General Public License v3.0 5 votes vote down vote up
def _gaussian(M, Rho):
    """
    Generates samples from the Gaussian Copula, w/ dependency
    matrix described by Rho.  Rho should be a numpy square matrix.
    It is assumed that we have a 0 mean.
    """
    N = Rho.shape[0]
    mu = np.zeros(N)
    y = multivariate_normal(mu,Rho)
    mvnData = y.rvs(size=M)
    U = norm.cdf(mvnData)
    
    return U 
Example #4
Source File: copularnd.py    From copula-py with GNU General Public License v3.0 5 votes vote down vote up
def _clayton(M, N, alpha):
    if(alpha<0):
        raise ValueError('Alpha must be >=0 for Clayton Copula Family')
    if(N<2):
        raise ValueError('Dimensionality Argument [N] must be an integer >= 2')
    elif(N==2):
        u1 = uniform.rvs(size=M)
        p = uniform.rvs(size=M)
        if(alpha<np.spacing(1)):
            u2 = p
        else:
            u2 = u1*np.power((np.power(p,(-alpha/(1.0+alpha))) - 1 + np.power(u1,alpha)),(-1.0/alpha))
        
        U = np.column_stack((u1,u2))
    else:
        # Algorithm 1 described in both the SAS Copula Procedure, as well as the
        # paper: "High Dimensional Archimedean Copula Generation Algorithm"
        U = np.empty((M,N))
        for ii in range(0,M):
            shape = 1.0/alpha
            loc = 0
            scale = 1
            v = gamma.rvs(shape)
            
            # sample N independent uniform random variables
            x_i = uniform.rvs(size=N)
            t = -1*np.log(x_i)/v
            if(alpha<0):
                tmp = np.maximum(0, 1.0-t)
            else:
                tmp = 1.0 + t
            
            U[ii,:] = np.power(tmp, -1.0/alpha)

    return U 
Example #5
Source File: copularnd.py    From copula-py with GNU General Public License v3.0 5 votes vote down vote up
def _frank(M, N, alpha):
    if(N<2):
        raise ValueError('Dimensionality Argument [N] must be an integer >= 2')
    elif(N==2):        
        u1 = uniform.rvs(size=M)
        p = uniform.rvs(size=M)
        if abs(alpha) > math.log(sys.float_info.max):
            u2 = (u1 < 0).astype(int) + np.sign(alpha)*u1  # u1 or 1-u1
        elif abs(alpha) > math.sqrt(np.spacing(1)):
            u2 = -1*np.log((np.exp(-alpha*u1)*(1-p)/p + np.exp(-alpha))/(1 + np.exp(-alpha*u1)*(1-p)/p))/alpha
        else:
            u2 = p
        
        U = np.column_stack((u1,u2))
    else:
        # Algorithm 1 described in both the SAS Copula Procedure, as well as the
        # paper: "High Dimensional Archimedean Copula Generation Algorithm"
        if(alpha<=0):
            raise ValueError('For N>=3, alpha >0 in Frank Copula')
            
        U = np.empty((M,N))
        #v_vec = np.empty(M)
        for ii in range(0,M):
            p = -1.0*np.expm1(-1*alpha)
            if(p==1):
                # boundary case protection
                p = 1 - np.spacing(1)
            v = logser.rvs(p, size=1)
            #v_vec[ii] = v
            # sample N independent uniform random variables
            x_i = uniform.rvs(size=N)
            t = -1*np.log(x_i)/v
            U[ii,:] = -1.0*np.log1p( np.exp(-t)*np.expm1(-1.0*alpha))/alpha
            
        #sio.savemat('logser_v.mat', {'v':v_vec})
            
    return U 
Example #6
Source File: generator.py    From P4J with MIT License 4 votes vote down vote up
def generate_uncertainties(N, dist='Gamma', rseed=None):
    """
    This function generates a uncertainties for the white noise component
    in the synthetic light curve. 
    
    Parameters
    ---------
    N: positive integer
        Lenght of the returned uncertainty vector
    dist: {'EMG', 'Gamma'}
        Probability density function (PDF) used to generate the 
        uncertainties
    rseed:
        Seed for the random number generator
        
    Returns
    -------
    s: ndarray
        Vector containing the uncertainties
    expected_s_2: float
        Expectation of the square of s computed analytically
        
    """
    np.random.seed(rseed)  
    #print(dist)
    if dist == 'EMG':  # Exponential modified Gaussian
        # the mean of a EMG rv is mu + 1/(K*sigma)
        # the variance of a EMG rv is sigma**2 + 1/(K*sigma)**2
        K = 1.824328605481941
        sigma = 0.05*0.068768312946785953
        mu = 0.05*0.87452567616276777
        # IMPORTANT NOTE
        # These parameters were obtained after fitting uncertainties
        # coming from 10,000 light curves of the VVV survey
        expected_s_2 = sigma**2 + mu**2 + 2*K*mu*sigma + 2*K**2*sigma**2 
        s = exponnorm.rvs(K, loc=mu, scale=sigma, size=N)
    elif dist == 'Gamma':
        # The mean of a gamma rv is k*sigma
        # The variance of a gamma rv is k*sigma**2
        k = 3.0
        sigma = 0.05/k  #  mean=0.05, var=0.05**2/k
        s = gamma.rvs(k, loc=0.0, scale=sigma, size=N)
        expected_s_2 = k*(1+k)*sigma**2  
    return s, expected_s_2 
Example #7
Source File: conviction_system_logic.py    From cadCAD with MIT License 4 votes vote down vote up
def driving_process(params, step, sL, s):
    
    #placeholder plumbing for random processes
    arrival_rate = 10/s['sentiment']
    rv1 = np.random.rand()
    new_participant = bool(rv1<1/arrival_rate)
    if new_participant:
        h_rv = expon.rvs(loc=0.0, scale=1000)
        new_participant_holdings = h_rv
    else:
        new_participant_holdings = 0
    
    network = s['network']
    affinities = [network.edges[e]['affinity'] for e in network.edges ]
    median_affinity = np.median(affinities)
    
    proposals = get_nodes_by_type(network, 'proposal')
    fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
    
    funds = s['funds']
    total_funds_requested = np.sum(fund_requests)
    
    proposal_rate = 10/median_affinity * total_funds_requested/funds
    rv2 = np.random.rand()
    new_proposal = bool(rv2<1/proposal_rate)
    
    sentiment = s['sentiment']
    funds = s['funds']
    scale_factor = 1+4000*sentiment**2
    
    #this shouldn't happen but expon is throwing domain errors
    if scale_factor > 1: 
        funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
    else:
        funds_arrival = 0
    
    return({'new_participant':new_participant,
            'new_participant_holdings':new_participant_holdings,
            'new_proposal':new_proposal, 
            'funds_arrival':funds_arrival})

    
#Mechanisms for updating the state based on driving processes
##--- 
Example #8
Source File: conviction_system_logic_sim.py    From cadCAD with MIT License 4 votes vote down vote up
def driving_process(params, step, sL, s):
    
    #placeholder plumbing for random processes
    arrival_rate = 10/s['sentiment']
    rv1 = np.random.rand()
    new_participant = bool(rv1<1/arrival_rate)
    if new_participant:
        h_rv = expon.rvs(loc=0.0, scale=1000)
        new_participant_holdings = h_rv
    else:
        new_participant_holdings = 0
    
    network = s['network']
    affinities = [network.edges[e]['affinity'] for e in network.edges ]
    median_affinity = np.median(affinities)
    
    proposals = get_nodes_by_type(network, 'proposal')
    fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
    
    funds = s['funds']
    total_funds_requested = np.sum(fund_requests)
    
    proposal_rate = 10/median_affinity * total_funds_requested/funds
    rv2 = np.random.rand()
    new_proposal = bool(rv2<1/proposal_rate)
    
    sentiment = s['sentiment']
    funds = s['funds']
    scale_factor = 1+4000*sentiment**2
    
    #this shouldn't happen but expon is throwing domain errors
    if scale_factor > 1: 
        funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
    else:
        funds_arrival = 0
    
    return({'new_participant':new_participant,
            'new_participant_holdings':new_participant_holdings,
            'new_proposal':new_proposal, 
            'funds_arrival':funds_arrival})

    
#Mechanisms for updating the state based on driving processes
##--- 
Example #9
Source File: conviction_helpers.py    From cadCAD with MIT License 4 votes vote down vote up
def initialize_network(n,m, funds_func=total_funds_given_total_supply, trigger_func =trigger_threshold ):
    network = nx.DiGraph()
    for i in range(n):
        network.add_node(i)
        network.nodes[i]['type']="participant"
        
        h_rv = expon.rvs(loc=0.0, scale=1000)
        network.nodes[i]['holdings'] = h_rv
        
        s_rv = np.random.rand() 
        network.nodes[i]['sentiment'] = s_rv
    
    participants = get_nodes_by_type(network, 'participant')
    initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
    
    initial_funds = funds_func(initial_supply)    
    
    #generate initial proposals
    for ind in range(m):
        j = n+ind
        network.add_node(j)
        network.nodes[j]['type']="proposal"
        network.nodes[j]['conviction']=0
        network.nodes[j]['status']='candidate'
        network.nodes[j]['age']=0
        
        r_rv = gamma.rvs(3,loc=0.001, scale=10000)
        network.node[j]['funds_requested'] = r_rv
        
        network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply)
        
        for i in range(n):
            network.add_edge(i, j)
            
            rv = np.random.rand()
            a_rv = 1-4*(1-rv)*rv #polarized distribution
            network.edges[(i, j)]['affinity'] = a_rv
            network.edges[(i,j)]['tokens'] = 0
            network.edges[(i, j)]['conviction'] = 0
            
        proposals = get_nodes_by_type(network, 'proposal')
        total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
        
    return network, initial_funds, initial_supply, total_requested 
Example #10
Source File: copularnd.py    From copula-py with GNU General Public License v3.0 4 votes vote down vote up
def _gumbel(M, N, alpha):
    if alpha < 1:
        raise ValueError('Alpha must be >=1 for Gumbel Copula Family!')
    if(N<2):
        raise ValueError('Dimensionality Argument [N] must be an integer >= 2')
    elif(N==2):
        if alpha < (1 + math.sqrt(np.spacing(1))):
            u1 = uniform.rvs(size=M);
            u2 = uniform.rvs(size=M);
        else:
            # use the Marshal-Olkin method
            # Generate gamma as Stable(1/alpha,1), c.f. Devroye, Thm. IV.6.7
            u = (uniform.rvs(size=M) - .5) * math.pi # Generate M uniformly distributed RV's between -pi/2 and pi/2
            u2 = u + math.pi/2
            e  = -1*np.log(uniform.rvs(size=M))
            t = np.cos(u - u2/alpha)/e
            gamma = np.power(np.sin(u2/alpha)/t,(1.0/alpha)) * t/np.cos(u);
            
            # Frees&Valdez, eqn 3.5
            u1 = np.exp(-1* (np.power(-1*np.log(uniform.rvs(size=M)), 1.0/alpha) / gamma) )
            u2 = np.exp(-1* (np.power(-1*np.log(uniform.rvs(size=M)), 1.0/alpha) / gamma) )
            
        U = np.column_stack((u1,u2))
    else:
        # Algorithm 1 described in both the SAS Copula Procedure, as well as the
        # paper: "High Dimensional Archimedean Copula Generation Algorithm"
        U = np.empty((M,N))
        #v_vec = np.empty(M)
        for ii in range(0,M):
            a  = 1.0/alpha
            b  = 1
            g  = np.power(np.cos(math.pi/(2.0*alpha)), alpha)
            d  = 0
            pm = 1
            v = rstable1(1,a,b,g,d,pm)
            #v_vec[ii] = v
            # sample N independent uniform random variables
            x_i = uniform.rvs(size=N)
            t = -1*np.log(x_i)/v
            
            U[ii,:] = np.exp(-1*np.power(t, 1.0/alpha))
        
        #sio.savemat('gamma_v.mat', {'v':v_vec})
        
    return U 
Example #11
Source File: gtr_site_specific.py    From treetime with MIT License 4 votes vote down vote up
def random(cls, L=1, avg_mu=1.0, alphabet='nuc', pi_dirichlet_alpha=1,
               W_dirichlet_alpha=3.0, mu_gamma_alpha=3.0):
        """
        Creates a random GTR model

        Parameters
        ----------
        L : int, optional
            number of sites for which to generate a model
        avg_mu : float
           Substitution rate
        alphabet : str
           Alphabet name (should be standard: 'nuc', 'nuc_gap', 'aa', 'aa_gap')
        pi_dirichlet_alpha : float, optional
            parameter of dirichlet distribution
        W_dirichlet_alpha : float, optional
            parameter of dirichlet distribution
        mu_gamma_alpha : float, optional
            parameter of dirichlet distribution

        Returns
        -------
        GTR_site_specific
            model with randomly sampled frequencies
        """

        from scipy.stats import gamma
        alphabet=alphabets[alphabet]
        gtr = cls(alphabet=alphabet, seq_len=L)
        n = gtr.alphabet.shape[0]

        # Dirichlet distribution == l_1 normalized vector of samples of the Gamma distribution
        if pi_dirichlet_alpha:
            pi = 1.0*gamma.rvs(pi_dirichlet_alpha, size=(n,L))
        else:
            pi = np.ones((n,L))

        pi /= pi.sum(axis=0)
        if W_dirichlet_alpha:
            tmp = 1.0*gamma.rvs(W_dirichlet_alpha, size=(n,n))
        else:
            tmp = np.ones((n,n))
        tmp = np.tril(tmp,k=-1)
        W = tmp + tmp.T

        if mu_gamma_alpha:
            mu = gamma.rvs(mu_gamma_alpha, size=(L,))
        else:
            mu = np.ones(L)

        gtr.assign_rates(mu=mu, pi=pi, W=W)
        gtr.mu *= avg_mu/np.mean(gtr.average_rate())

        return gtr