Python torch.distributions.Bernoulli() Examples
The following are 28
code examples of torch.distributions.Bernoulli().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.distributions
, or try the search function
.
Example #1
Source File: model.py From dgl with Apache License 2.0 | 6 votes |
def forward(self, g, action=None): graph_embed = self.graph_op['embed'](g) logit = self.add_node(graph_embed) prob = torch.sigmoid(logit) if not self.training: action = Bernoulli(prob).sample().item() stop = bool(action == self.stop) if not stop: g.add_nodes(1) self._initialize_node_repr(g, action, graph_embed) if self.training: sample_log_prob = bernoulli_action_log_prob(logit, action) self.log_prob.append(sample_log_prob) return stop
Example #2
Source File: model.py From dgl with Apache License 2.0 | 6 votes |
def forward(self, g, action=None): graph_embed = self.graph_op['embed'](g) src_embed = g.nodes[g.number_of_nodes() - 1].data['hv'] logit = self.add_edge(torch.cat( [graph_embed, src_embed], dim=1)) prob = torch.sigmoid(logit) if not self.training: action = Bernoulli(prob).sample().item() to_add_edge = bool(action == 0) if self.training: sample_log_prob = bernoulli_action_log_prob(logit, action) self.log_prob.append(sample_log_prob) return to_add_edge
Example #3
Source File: dropblock.py From MetaOptNet with Apache License 2.0 | 6 votes |
def forward(self, x, gamma): # shape: (bsize, channels, height, width) if self.training: batch_size, channels, height, width = x.shape bernoulli = Bernoulli(gamma) mask = bernoulli.sample((batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1))).cuda() #print((x.sample[-2], x.sample[-1])) block_mask = self._compute_block_mask(mask) #print (block_mask.size()) #print (x.size()) countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3] count_ones = block_mask.sum() return block_mask * x * (countM / count_ones) else: return x
Example #4
Source File: dropblock.py From FEAT with MIT License | 6 votes |
def forward(self, x, gamma): # shape: (bsize, channels, height, width) if self.training: batch_size, channels, height, width = x.shape bernoulli = Bernoulli(gamma) mask = bernoulli.sample((batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1))) if torch.cuda.is_available(): mask = mask.cuda() block_mask = self._compute_block_mask(mask) countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3] count_ones = block_mask.sum() return block_mask * x * (countM / count_ones) else: return x
Example #5
Source File: ipd_DiCE_om.py From LOLA_DiCE with MIT License | 5 votes |
def act(batch_states, theta, values): batch_states = torch.from_numpy(batch_states).long() probs = torch.sigmoid(theta)[batch_states] m = Bernoulli(1-probs) actions = m.sample() log_probs_actions = m.log_prob(actions) return actions.numpy().astype(int), log_probs_actions, values[batch_states]
Example #6
Source File: __init__.py From occupancy_networks with MIT License | 5 votes |
def decode(self, p, z, c, **kwargs): ''' Returns occupancy probabilities for the sampled points. Args: p (tensor): points z (tensor): latent code z c (tensor): latent conditioned code c ''' logits = self.decoder(p, z, c, **kwargs) p_r = dist.Bernoulli(logits=logits) return p_r
Example #7
Source File: exponential_distributions.py From pixyz with MIT License | 5 votes |
def set_dist(self, x_dict={}, batch_n=None, sampling=False, **kwargs): """Set :attr:`dist` as PyTorch distributions given parameters. This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set. Parameters ---------- x_dict : :obj:`dict`, defaults to {}. Parameters of this distribution. batch_n : :obj:`int`, defaults to None. Set batch size of parameters. sampling : :obj:`bool` defaults to False. If it is false, the distribution will not be relaxed to compute log_prob. **kwargs Arbitrary keyword arguments. Returns ------- """ params = self.get_params(x_dict, **kwargs) if set(self.params_keys) != set(params.keys()): raise ValueError("{} class requires following parameters: {}\n" "but got {}".format(type(self), set(self.params_keys), set(params.keys()))) if sampling: self._dist = self.distribution_torch_class(**params) else: hard_params_keys = ["probs"] self._dist = BernoulliTorch(**get_dict_values(params, hard_params_keys, return_dict=True)) # expand batch_n if batch_n: batch_shape = self._dist.batch_shape if batch_shape[0] == 1: self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:]) elif batch_shape[0] == batch_n: return else: raise ValueError()
Example #8
Source File: exponential_distributions.py From pixyz with MIT License | 5 votes |
def set_dist(self, x_dict={}, batch_n=None, sampling=False, **kwargs): """Set :attr:`dist` as PyTorch distributions given parameters. This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set. Parameters ---------- x_dict : :obj:`dict`, defaults to {}. Parameters of this distribution. batch_n : :obj:`int`, defaults to None. Set batch size of parameters. sampling : :obj:`bool` defaults to False. If it is false, the distribution will not be relaxed to compute log_prob. **kwargs Arbitrary keyword arguments. Returns ------- """ params = self.get_params(x_dict, **kwargs) if set(self.params_keys) != set(params.keys()): raise ValueError("{} class requires following parameters: {}\n" "but got {}".format(type(self), set(self.params_keys), set(params.keys()))) if sampling: self._dist = self.distribution_torch_class(**params) else: hard_params_keys = ["probs"] self._dist = BernoulliTorch(**get_dict_values(params, hard_params_keys, return_dict=True)) # expand batch_n if batch_n: batch_shape = self._dist.batch_shape if batch_shape[0] == 1: self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:]) elif batch_shape[0] == batch_n: return else: raise ValueError()
Example #9
Source File: ResNet12.py From hyperbolic-image-embeddings with MIT License | 5 votes |
def forward(self, x, gamma): # shape: (bsize, channels, height, width) if self.training: batch_size, channels, height, width = x.shape bernoulli = Bernoulli(gamma) mask = bernoulli.sample( ( batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1), ) ) if torch.cuda.is_available(): mask = mask.cuda() block_mask = self._compute_block_mask(mask) countM = ( block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3] ) count_ones = block_mask.sum() return block_mask * x * (countM / count_ones) else: return x
Example #10
Source File: baseline_model.py From TextFlow with MIT License | 5 votes |
def generate(self, T, B): if not self.T_condition: raise NotImplementedError("Only the version conditioned on T has been implemented.") hidden = self.init_hidden(B) lengths = torch.tensor([T]*B) device = hidden[0].device cond_inp = make_pos_cond(T, B, lengths, self.max_T).to(device) if self.indep_bernoulli: generation = torch.zeros(T, B, self.vocab_size, dtype=torch.long, device=device) else: generation = torch.zeros(T, B, dtype=torch.long, device=device) last_rnn_outp = hidden[0][-1] for t in range(T): scores = self.output_embedding(last_rnn_outp) # [B, V] if self.indep_bernoulli: word_dist = Bernoulli(logits=scores) else: word_dist = Categorical(logits=scores) selected_index = word_dist.sample() generation[t] = selected_index if t < T-1: if self.indep_bernoulli: inp_embeddings = torch.matmul(generation[t].float(), self.input_embedding.weight) else: inp_embeddings = self.input_embedding(generation[t]) # [B, E] inp_embeddings = torch.cat((inp_embeddings, cond_inp[t+1]), -1) last_rnn_outp, hidden = self.rnn(inp_embeddings[None, :, :], hidden) last_rnn_outp = last_rnn_outp[0] return generation
Example #11
Source File: __init__.py From occupancy_flow with MIT License | 5 votes |
def decode(self, p, z=None, c=None, **kwargs): ''' Returns occupancy values for the points p at time step t. Args: p (tensor): points of dimension 4 z (tensor): latent code z c (tensor): latent conditioned code c (For OFlow, this is c_spatial, whereas for ONet 4D, this is c_temporal) ''' logits = self.decoder(p, z, c, **kwargs) p_r = dist.Bernoulli(logits=logits) return p_r
Example #12
Source File: __init__.py From occupancy_flow with MIT License | 5 votes |
def decode(self, p, z=None, c=None, **kwargs): ''' Returns occupancy values for the points p at time step 0. Args: p (tensor): points z (tensor): latent code z c (tensor): latent conditioned code c (For OFlow, this is c_spatial) ''' logits = self.decoder(p, z, c, **kwargs) p_r = dist.Bernoulli(logits=logits) return p_r
Example #13
Source File: epsilon_greedy.py From cherry with Apache License 2.0 | 5 votes |
def forward(self, x): bests = x.max(dim=1, keepdim=True)[1] sampled = Categorical(probs=th.ones_like(x)).sample() probs = th.ones(x.size(0), 1) - self.epsilon b = Bernoulli(probs=probs).sample().long() ret = bests * b + (1 - b) * sampled return ret
Example #14
Source File: dropout.py From deep-generative-lm with MIT License | 5 votes |
def sample_mask(self, p, shape): """Samples a dropout mask from a Bernoulli distribution. Args: p(float): the dropout probability [0, 1]. shape(torch.Size): shape of the mask to be sampled. """ if self.training: self._mask = Bernoulli(1. - p).sample(shape) else: self._mask = (1. - p)
Example #15
Source File: ipd_exact.py From LOLA_DiCE with MIT License | 5 votes |
def act(batch_states, theta): batch_states = torch.from_numpy(batch_states).long() probs = torch.sigmoid(theta)[batch_states] m = Bernoulli(1-probs) actions = m.sample() log_probs_actions = m.log_prob(actions) return actions.numpy().astype(int), log_probs_actions
Example #16
Source File: ipd_exact_om.py From LOLA_DiCE with MIT License | 5 votes |
def act(batch_states, theta): batch_states = torch.from_numpy(batch_states).long() probs = torch.sigmoid(theta)[batch_states] m = Bernoulli(1-probs) actions = m.sample() log_probs_actions = m.log_prob(actions) return actions.numpy().astype(int), log_probs_actions
Example #17
Source File: ipd_DiCE.py From LOLA_DiCE with MIT License | 5 votes |
def act(batch_states, theta, values): batch_states = torch.from_numpy(batch_states).long() probs = torch.sigmoid(theta)[batch_states] m = Bernoulli(1-probs) actions = m.sample() log_probs_actions = m.log_prob(actions) return actions.numpy().astype(int), log_probs_actions, values[batch_states]
Example #18
Source File: dropblock.py From MetaOptNet with Apache License 2.0 | 5 votes |
def __init__(self, block_size): super(DropBlock, self).__init__() self.block_size = block_size #self.gamma = gamma #self.bernouli = Bernoulli(gamma)
Example #19
Source File: __init__.py From torchsupport with MIT License | 5 votes |
def _hard_bernoulli(self, dist): return dist.Bernoulli(logits=dist.logits)
Example #20
Source File: 5_dgmg.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, g, action=None): graph_embed = self.graph_op['embed'](g) src_embed = g.nodes[g.number_of_nodes() - 1].data['hv'] logit = self.add_edge(torch.cat( [graph_embed, src_embed], dim=1)) prob = torch.sigmoid(logit) if self.training: sample_log_prob = bernoulli_action_log_prob(logit, action) self.log_prob.append(sample_log_prob) else: action = Bernoulli(prob).sample().item() to_add_edge = bool(action == 0) return to_add_edge ####################################################################################### # Action 3: Choose a destination # ''''''''''''''''''''''''''''''''' # # When action 2 returns `True`, choose a destination for the # latest node :math:`v`. # # For each possible destination :math:`u\in\{0, \cdots, v-1\}`, the # probability of choosing it is given by # # .. math:: # # \frac{\text{exp}(\textbf{W}_{\text{dest}}\text{concat}([\textbf{h}_{u}, \textbf{h}_{v}])+\textbf{b}_{\text{dest}})}{\sum_{i=0}^{v-1}\text{exp}(\textbf{W}_{\text{dest}}\text{concat}([\textbf{h}_{i}, \textbf{h}_{v}])+\textbf{b}_{\text{dest}})}\\ #
Example #21
Source File: 5_dgmg.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, g, action=None): graph_embed = self.graph_op['embed'](g) logit = self.add_node(graph_embed) prob = torch.sigmoid(logit) if not self.training: action = Bernoulli(prob).sample().item() stop = bool(action == self.stop) if not stop: g.add_nodes(1) self._initialize_node_repr(g, action, graph_embed) if self.training: sample_log_prob = bernoulli_action_log_prob(logit, action) self.log_prob.append(sample_log_prob) return stop ####################################################################################### # Action 2: Add edges # '''''''''''''''''''''''''' # # Given the graph embedding vector :math:`\textbf{h}_{G}` and the node # embedding vector :math:`\textbf{h}_{v}` for the latest node :math:`v`, # you evaluate # # .. math:: # # \text{Sigmoid}(\textbf{W}_{\text{add edge}}\text{concat}([\textbf{h}_{G}, \textbf{h}_{v}])+b_{\text{add edge}}),\\ # # which is then used to parametrize a Bernoulli distribution for deciding # whether to add a new edge starting from :math:`v`. #
Example #22
Source File: 5_dgmg.py From dgl with Apache License 2.0 | 5 votes |
def bernoulli_action_log_prob(logit, action): """Calculate the log p of an action with respect to a Bernoulli distribution. Use logit rather than prob for numerical stability.""" if action == 0: return F.logsigmoid(-logit) else: return F.logsigmoid(logit)
Example #23
Source File: 5_dgmg.py From dgl with Apache License 2.0 | 5 votes |
def forward(self, g): if g.number_of_edges() > 0: for t in range(self.num_prop_rounds): g.update_all(message_func=self.dgmg_msg, reduce_func=self.reduce_funcs[t]) g.ndata['hv'] = self.node_update_funcs[t]( g.ndata['a'], g.ndata['hv']) ####################################################################################### # Actions # `````````````````````````` # All actions are sampled from distributions parameterized using neural networks # and here they are in turn. # # Action 1: Add nodes # '''''''''''''''''''''''''' # # Given the graph embedding vector :math:`\textbf{h}_{G}`, evaluate # # .. math:: # # \text{Sigmoid}(\textbf{W}_{\text{add node}}\textbf{h}_{G}+b_{\text{add node}}),\\ # # which is then used to parametrize a Bernoulli distribution for deciding whether # to add a new node. # # If a new node is to be added, initialize its feature with # # .. math:: # # \textbf{W}_{\text{init}}\text{concat}([\textbf{h}_{\text{init}} , \textbf{h}_{G}])+\textbf{b}_{\text{init}},\\ # # where :math:`\textbf{h}_{\text{init}}` is a learnable embedding module for # untyped nodes. #
Example #24
Source File: model_batch.py From dgl with Apache License 2.0 | 5 votes |
def bernoulli_action_log_prob(logit, action): """ Calculate the log p of an action with respect to a Bernoulli distribution across a batch of actions. Use logit rather than prob for numerical stability. """ log_probs = torch.cat([F.logsigmoid(-logit), F.logsigmoid(logit)], dim=1) return log_probs.gather(1, torch.tensor(action).unsqueeze(1))
Example #25
Source File: model.py From dgl with Apache License 2.0 | 5 votes |
def bernoulli_action_log_prob(logit, action): """Calculate the log p of an action with respect to a Bernoulli distribution. Use logit rather than prob for numerical stability.""" if action == 0: return F.logsigmoid(-logit) else: return F.logsigmoid(logit)
Example #26
Source File: totalvi.py From scVI with MIT License | 4 votes |
def get_sample_scale( self, x: torch.Tensor, y: torch.Tensor, batch_index: Optional[torch.Tensor] = None, label: Optional[torch.Tensor] = None, n_samples: int = 1, transform_batch: Optional[int] = None, eps=0, normalize_pro=False, sample_bern=True, include_bg=False, ) -> torch.Tensor: """Returns tuple of gene and protein scales. These scales can also be transformed into a particular batch. This function is the core of differential expression. Parameters ---------- transform_batch Int of batch to "transform" all cells into eps Prior count to add to protein normalized expression (Default value = 0) normalize_pro bool, whether to make protein expression sum to one in a cell (Default value = False) include_bg bool, whether to include the background component of expression (Default value = False) Returns ------- """ outputs = self.inference( x, y, batch_index=batch_index, label=label, n_samples=n_samples, transform_batch=transform_batch, ) px_ = outputs["px_"] py_ = outputs["py_"] protein_mixing = 1 / (1 + torch.exp(-py_["mixing"])) if sample_bern is True: protein_mixing = Bernoulli(protein_mixing).sample() pro_value = (1 - protein_mixing) * py_["rate_fore"] if include_bg is True: pro_value = (1 - protein_mixing) * py_["rate_fore"] + protein_mixing * py_[ "rate_back" ] if normalize_pro is True: pro_value = torch.nn.functional.normalize(pro_value, p=1, dim=-1) return px_["scale"], pro_value + eps
Example #27
Source File: model_batch.py From dgl with Apache License 2.0 | 4 votes |
def forward(self, g_list, a=None): """ Decide if a new node should be added for each graph in the `g_list`. If a new node is added, initialize its node representations. Record graphs for which a new node is added. During training, the action is passed rather than made and the log P of the action is recorded. During inference, the action is sampled from a Bernoulli distribution modeled. Parameters ---------- g_list : list A list of dgl.DGLGraph objects a : None or list - During training, a is a list of integers specifying whether a new node should be added. - During inference, a is None. Returns ------- g_non_stop : list list of indices to specify which graphs in the g_list have a new node added """ # Graphs for which a node is added g_non_stop = [] batch_graph_embed = self.graph_op['embed'](g_list) batch_logit = self.add_node(batch_graph_embed) batch_prob = torch.sigmoid(batch_logit) if not self.training: a = Bernoulli(batch_prob).sample().squeeze(1).tolist() for i, g in enumerate(g_list): action = a[i] stop = bool(action == self.stop) if not stop: g_non_stop.append(g.index) g.add_nodes(1) self._initialize_node_repr(g, action, batch_graph_embed[i:i+1, :]) if self.training: sample_log_prob = bernoulli_action_log_prob(batch_logit, a) self.log_prob.append(sample_log_prob) return g_non_stop
Example #28
Source File: restore.py From biva-pytorch with MIT License | 4 votes |
def restore_session(logdir, device='auto'): """load model from a saved session""" if logdir[-1] == '/': logdir = logdir[:-1] run_id = logdir.split('/')[-1] # load the hyperparameters and arguments hyperparameters = pickle.load(open(os.path.join(logdir, "hyperparameters.p"), "rb")) opt = json.load(open(os.path.join(logdir, "config.json"))) # instantiate the model model = DeepVae(**hyperparameters) device = available_device() if device == 'auto' else device model.to(device) # load pretrained weights load_model(model, logdir) # define likelihood and evaluator likelihood = {'cifar': DiscretizedMixtureLogits(opt['nr_mix']), 'binmnist': Bernoulli}[opt['dataset']] evaluator = VariationalInference(likelihood, iw_samples=1) # load the dataset if opt['dataset'] == 'binmnist': train_dataset, valid_dataset, test_dataset = get_binmnist_datasets(opt['data_root']) elif opt['dataset'] == 'cifar10': from torchvision.transforms import Lambda transform = Lambda(lambda x: x * 2 - 1) train_dataset, valid_dataset, test_dataset = get_cifar10_datasets(opt.data_root, transform=transform) else: raise NotImplementedError return { 'model': model, 'device': device, 'run_id': run_id, 'hyperparameters': hyperparameters, 'opt': hyperparameters, 'likelihood': likelihood, 'evaluator': evaluator, 'train_dataset': train_dataset, 'valid_dataset': valid_dataset, 'test_dataset': test_dataset, }