Python tensorflow.python.ops.random_ops.random_gamma() Examples
The following are 29
code examples of tensorflow.python.ops.random_ops.random_gamma().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.random_ops
, or try the search function
.
Example #1
Source File: dirichlet_multinomial.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) k = self.event_shape_tensor()[0] unnormalized_logits = array_ops.reshape( math_ops.log(random_ops.random_gamma( shape=[n], alpha=self.concentration, dtype=self.dtype, seed=seed)), shape=[-1, k]) draws = random_ops.multinomial( logits=unnormalized_logits, num_samples=n_draws, seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial")) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2) final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #2
Source File: beta.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): expanded_concentration1 = array_ops.ones_like( self.total_concentration, dtype=self.dtype) * self.concentration1 expanded_concentration0 = array_ops.ones_like( self.total_concentration, dtype=self.dtype) * self.concentration0 gamma1_sample = random_ops.random_gamma( shape=[n], alpha=expanded_concentration1, dtype=self.dtype, seed=seed) gamma2_sample = random_ops.random_gamma( shape=[n], alpha=expanded_concentration0, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "beta")) beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) return beta_sample
Example #3
Source File: student_t.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # The sampling method comes from the fact that if: # X ~ Normal(0, 1) # Z ~ Chi2(df) # Y = X / sqrt(Z / df) # then: # Y ~ StudentT(df). shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) gamma_sample = random_ops.random_gamma( [n], 0.5 * df, beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, salt="student_t")) samples = normal_sample * math_ops.rsqrt(gamma_sample / df) return samples * self.scale + self.loc # Abs(scale) not wanted.
Example #4
Source File: student_t.py From keras-lambda with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # The sampling method comes from the fact that if: # X ~ Normal(0, 1) # Z ~ Chi2(df) # Y = X / sqrt(Z / df) # then: # Y ~ StudentT(df). shape = array_ops.concat([[n], self.batch_shape()], 0) normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype) gamma_sample = random_ops.random_gamma( [n], 0.5 * df, beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, salt="student_t")) samples = normal_sample / math_ops.sqrt(gamma_sample / df) return samples * self.sigma + self.mu # Abs(sigma) not wanted.
Example #5
Source File: negative_binomial.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # Here we use the fact that if: # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs) # then X ~ Poisson(lam) is Negative Binomially distributed. rate = random_ops.random_gamma( shape=[n], alpha=self.total_count, beta=math_ops.exp(-self.logits), dtype=self.dtype, seed=seed) return random_ops.random_poisson( rate, shape=[], dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "negative_binom"))
Example #6
Source File: dirichlet_multinomial.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) k = self.event_shape_tensor()[0] unnormalized_logits = array_ops.reshape( math_ops.log(random_ops.random_gamma( shape=[n], alpha=self.concentration, dtype=self.dtype, seed=seed)), shape=[-1, k]) draws = random_ops.multinomial( logits=unnormalized_logits, num_samples=n_draws, seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial")) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2) final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) x = array_ops.reshape(x, final_shape) return math_ops.cast(x, self.dtype)
Example #7
Source File: beta.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _sample_n(self, n, seed=None): expanded_concentration1 = array_ops.ones_like( self.total_concentration, dtype=self.dtype) * self.concentration1 expanded_concentration0 = array_ops.ones_like( self.total_concentration, dtype=self.dtype) * self.concentration0 gamma1_sample = random_ops.random_gamma( shape=[n], alpha=expanded_concentration1, dtype=self.dtype, seed=seed) gamma2_sample = random_ops.random_gamma( shape=[n], alpha=expanded_concentration0, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "beta")) beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) return beta_sample
Example #8
Source File: student_t.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # The sampling method comes from the fact that if: # X ~ Normal(0, 1) # Z ~ Chi2(df) # Y = X / sqrt(Z / df) # then: # Y ~ StudentT(df). shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) gamma_sample = random_ops.random_gamma( [n], 0.5 * df, beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, salt="student_t")) samples = normal_sample * math_ops.rsqrt(gamma_sample / df) return samples * self.scale + self.loc # Abs(scale) not wanted.
Example #9
Source File: student_t.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # The sampling method comes from the fact that if: # X ~ Normal(0, 1) # Z ~ Chi2(df) # Y = X / sqrt(Z / df) # then: # Y ~ StudentT(df). shape = array_ops.concat([[n], self.batch_shape()], 0) normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed) df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype) gamma_sample = random_ops.random_gamma( [n], 0.5 * df, beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, salt="student_t")) samples = normal_sample / math_ops.sqrt(gamma_sample / df) return samples * self.sigma + self.mu # Abs(sigma) not wanted.
Example #10
Source File: dirichlet.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sample_n(self, n, seed=None): gamma_sample = random_ops.random_gamma( [n,], self.alpha, dtype=self.dtype, seed=seed) return gamma_sample / math_ops.reduce_sum( gamma_sample, reduction_indices=[-1], keep_dims=True)
Example #11
Source File: inverse_gamma.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed)
Example #12
Source File: gamma.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): return random_ops.random_gamma( shape=[n], alpha=self.concentration, beta=self.rate, dtype=self.dtype, seed=seed)
Example #13
Source File: dirichlet.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): gamma_sample = random_ops.random_gamma( shape=[n], alpha=self.concentration, dtype=self.dtype, seed=seed) return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keep_dims=True)
Example #14
Source File: gamma.py From keras-lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed)
Example #15
Source File: inverse_gamma.py From keras-lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed)
Example #16
Source File: dirichlet_multinomial.py From keras-lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.n, dtype=dtypes.int32) if self.n.get_shape().ndims is not None: if self.n.get_shape().ndims != 0: raise NotImplementedError( "Sample only supported for scalar number of draws.") elif self.validate_args: is_scalar = check_ops.assert_rank( n_draws, 0, message="Sample only supported for scalar number of draws.") n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws) k = self.event_shape()[0] unnormalized_logits = array_ops.reshape( math_ops.log(random_ops.random_gamma( shape=[n], alpha=self.alpha, dtype=self.dtype, seed=seed)), shape=[-1, k]) draws = random_ops.multinomial( logits=unnormalized_logits, num_samples=n_draws, seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial")) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), reduction_indices=-2) final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #17
Source File: beta.py From keras-lambda with MIT License | 5 votes |
def _sample_n(self, n, seed=None): a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b gamma1_sample = random_ops.random_gamma( [n,], a, dtype=self.dtype, seed=seed) gamma2_sample = random_ops.random_gamma( [n,], b, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "beta")) beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) return beta_sample
Example #18
Source File: student_t.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sample_n(self, n, seed=None): # The sampling method comes from the well known fact that if X ~ Normal(0, # 1), and Z ~ Chi2(df), then X / sqrt(Z / df) ~ StudentT(df). shape = array_ops.concat(0, ([n], self.batch_shape())) normal_sample = random_ops.random_normal( shape, dtype=self.dtype, seed=seed) half = constant_op.constant(0.5, self.dtype) df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype) gamma_sample = random_ops.random_gamma( [n,], half * df, beta=half, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, salt="student_t")) samples = normal_sample / math_ops.sqrt(gamma_sample / df) return samples * self.sigma + self.mu
Example #19
Source File: beta.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sample_n(self, n, seed=None): a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b gamma1_sample = random_ops.random_gamma( [n,], a, dtype=self.dtype, seed=seed) gamma2_sample = random_ops.random_gamma( [n,], b, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "beta")) beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) return beta_sample
Example #20
Source File: gamma.py From lambda-packs with MIT License | 5 votes |
def _sample_n(self, n, seed=None): return random_ops.random_gamma( shape=[n], alpha=self.concentration, beta=self.rate, dtype=self.dtype, seed=seed)
Example #21
Source File: gamma.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed)
Example #22
Source File: beta.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _sample_n(self, n, seed=None): a = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.a b = array_ops.ones_like(self.a_b_sum, dtype=self.dtype) * self.b gamma1_sample = random_ops.random_gamma( [n,], a, dtype=self.dtype, seed=seed) gamma2_sample = random_ops.random_gamma( [n,], b, dtype=self.dtype, seed=distribution_util.gen_new_seed(seed, "beta")) beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample) return beta_sample
Example #23
Source File: dirichlet_multinomial.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.n, dtype=dtypes.int32) if self.n.get_shape().ndims is not None: if self.n.get_shape().ndims != 0: raise NotImplementedError( "Sample only supported for scalar number of draws.") elif self.validate_args: is_scalar = check_ops.assert_rank( n_draws, 0, message="Sample only supported for scalar number of draws.") n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws) k = self.event_shape()[0] unnormalized_logits = array_ops.reshape( math_ops.log(random_ops.random_gamma( shape=[n], alpha=self.alpha, dtype=self.dtype, seed=seed)), shape=[-1, k]) draws = random_ops.multinomial( logits=unnormalized_logits, num_samples=n_draws, seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial")) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), reduction_indices=-2) final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #24
Source File: inverse_gamma.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed)
Example #25
Source File: gamma.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed)
Example #26
Source File: dirichlet.py From lambda-packs with MIT License | 5 votes |
def _sample_n(self, n, seed=None): gamma_sample = random_ops.random_gamma( shape=[n], alpha=self.concentration, dtype=self.dtype, seed=seed) return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keep_dims=True)
Example #27
Source File: wishart.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def _sample_n(self, n, seed): batch_shape = self.batch_shape() event_shape = self.event_shape() batch_ndims = array_ops.shape(batch_shape)[0] ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2 shape = array_ops.concat(((n,), batch_shape, event_shape), 0) # Complexity: O(nbk^2) x = random_ops.random_normal(shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed) # Complexity: O(nbk) # This parametrization is equivalent to Chi2, i.e., # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2) g = random_ops.random_gamma(shape=(n,), alpha=self._multi_gamma_sequence( 0.5 * self.df, self.dimension), beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed( seed, "wishart")) # Complexity: O(nbk^2) x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower. # Complexity: O(nbk) x = array_ops.matrix_set_diag(x, math_ops.sqrt(g)) # Make batch-op ready. # Complexity: O(nbk^2) perm = array_ops.concat((math_ops.range(1, ndims), (0,)), 0) x = array_ops.transpose(x, perm) shape = array_ops.concat((batch_shape, (event_shape[0], -1)), 0) x = array_ops.reshape(x, shape) # Complexity: O(nbM) where M is the complexity of the operator solving a # vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so # this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is # O(k^3) so this step has complexity O(nbk^3). x = self.scale_operator_pd.sqrt_matmul(x) # Undo make batch-op ready. # Complexity: O(nbk^2) shape = array_ops.concat((batch_shape, event_shape, (n,)), 0) x = array_ops.reshape(x, shape) perm = array_ops.concat(((ndims - 1,), math_ops.range(0, ndims - 1)), 0) x = array_ops.transpose(x, perm) if not self.cholesky_input_output_matrices: # Complexity: O(nbk^3) x = math_ops.matmul(x, x, adjoint_b=True) return x
Example #28
Source File: wishart.py From lambda-packs with MIT License | 4 votes |
def _sample_n(self, n, seed): batch_shape = self.batch_shape_tensor() event_shape = self.event_shape_tensor() batch_ndims = array_ops.shape(batch_shape)[0] ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2 shape = array_ops.concat([[n], batch_shape, event_shape], 0) # Complexity: O(nbk**2) x = random_ops.random_normal(shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed) # Complexity: O(nbk) # This parametrization is equivalent to Chi2, i.e., # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2) g = random_ops.random_gamma(shape=[n], alpha=self._multi_gamma_sequence( 0.5 * self.df, self.dimension), beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed( seed, "wishart")) # Complexity: O(nbk**2) x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower. # Complexity: O(nbk) x = array_ops.matrix_set_diag(x, math_ops.sqrt(g)) # Make batch-op ready. # Complexity: O(nbk**2) perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0) x = array_ops.transpose(x, perm) shape = array_ops.concat([batch_shape, [event_shape[0]], [-1]], 0) x = array_ops.reshape(x, shape) # Complexity: O(nbM) where M is the complexity of the operator solving a # vector system. E.g., for OperatorPDDiag, each matmul is O(k**2), so # this complexity is O(nbk**2). For OperatorPDCholesky, each matmul is # O(k^3) so this step has complexity O(nbk^3). x = self.scale_operator_pd.sqrt_matmul(x) # Undo make batch-op ready. # Complexity: O(nbk**2) shape = array_ops.concat([batch_shape, event_shape, [n]], 0) x = array_ops.reshape(x, shape) perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0) x = array_ops.transpose(x, perm) if not self.cholesky_input_output_matrices: # Complexity: O(nbk^3) x = math_ops.matmul(x, x, adjoint_b=True) return x
Example #29
Source File: wishart.py From keras-lambda with MIT License | 4 votes |
def _sample_n(self, n, seed): batch_shape = self.batch_shape() event_shape = self.event_shape() batch_ndims = array_ops.shape(batch_shape)[0] ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2 shape = array_ops.concat(((n,), batch_shape, event_shape), 0) # Complexity: O(nbk^2) x = random_ops.random_normal(shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed) # Complexity: O(nbk) # This parametrization is equivalent to Chi2, i.e., # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2) g = random_ops.random_gamma(shape=(n,), alpha=self._multi_gamma_sequence( 0.5 * self.df, self.dimension), beta=0.5, dtype=self.dtype, seed=distribution_util.gen_new_seed( seed, "wishart")) # Complexity: O(nbk^2) x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower. # Complexity: O(nbk) x = array_ops.matrix_set_diag(x, math_ops.sqrt(g)) # Make batch-op ready. # Complexity: O(nbk^2) perm = array_ops.concat((math_ops.range(1, ndims), (0,)), 0) x = array_ops.transpose(x, perm) shape = array_ops.concat((batch_shape, (event_shape[0], -1)), 0) x = array_ops.reshape(x, shape) # Complexity: O(nbM) where M is the complexity of the operator solving a # vector system. E.g., for OperatorPDDiag, each matmul is O(k^2), so # this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is # O(k^3) so this step has complexity O(nbk^3). x = self.scale_operator_pd.sqrt_matmul(x) # Undo make batch-op ready. # Complexity: O(nbk^2) shape = array_ops.concat((batch_shape, event_shape, (n,)), 0) x = array_ops.reshape(x, shape) perm = array_ops.concat(((ndims - 1,), math_ops.range(0, ndims - 1)), 0) x = array_ops.transpose(x, perm) if not self.cholesky_input_output_matrices: # Complexity: O(nbk^3) x = math_ops.matmul(x, x, adjoint_b=True) return x