Python torch.int64() Examples
The following are 30
code examples of torch.int64().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: predictor.py From R2CNN.pytorch with MIT License | 7 votes |
def overlay_boxes(self, image, predictions): """ Adds the predicted boxes on top of the image Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `labels`. """ labels = predictions.get_field("labels") boxes = predictions.bbox colors = self.compute_colors_for_labels(labels).tolist() for box, color in zip(boxes, colors): box = box.to(torch.int64) top_left, bottom_right = box[:2].tolist(), box[2:].tolist() image = cv2.rectangle( image, tuple(top_left), tuple(bottom_right), tuple(color), 1 ) return image
Example #2
Source File: predictor.py From Res2Net-maskrcnn with MIT License | 7 votes |
def overlay_boxes(self, image, predictions): """ Adds the predicted boxes on top of the image Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `labels`. """ labels = predictions.get_field("labels") boxes = predictions.bbox colors = self.compute_colors_for_labels(labels).tolist() for box, color in zip(boxes, colors): box = box.to(torch.int64) top_left, bottom_right = box[:2].tolist(), box[2:].tolist() image = cv2.rectangle( image, tuple(top_left), tuple(bottom_right), tuple(color), 1 ) return image
Example #3
Source File: loss.py From Parsing-R-CNN with MIT License | 6 votes |
def parsing_on_boxes(parsing, rois, heatmap_size): device = rois.device rois = rois.to(torch.device("cpu")) parsing_list = [] for i in range(rois.shape[0]): parsing_ins = parsing[i].cpu().numpy() xmin, ymin, xmax, ymax = torch.round(rois[i]).int() cropped_parsing = parsing_ins[max(0, ymin):ymax, max(0, xmin):xmax] resized_parsing = cv2.resize( cropped_parsing, (heatmap_size[1], heatmap_size[0]), interpolation=cv2.INTER_NEAREST ) parsing_list.append(torch.from_numpy(resized_parsing)) if len(parsing_list) == 0: return torch.empty(0, dtype=torch.int64, device=device) return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64)
Example #4
Source File: functional.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def mu_law_encoding( x: Tensor, quantization_channels: int ) -> Tensor: r"""Encode signal based on mu-law companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled to between -1 and 1 and returns a signal encoded with values from 0 to quantization_channels - 1. Args: x (Tensor): Input tensor quantization_channels (int): Number of channels Returns: Tensor: Input after mu-law encoding """ mu = quantization_channels - 1.0 if not x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64) return x_mu
Example #5
Source File: DetectronModels.py From Clothing-Detection with GNU General Public License v3.0 | 6 votes |
def overlay_boxes(self, image, predictions): """ Adds the predicted boxes on top of the image Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `labels`. """ labels = predictions.get_field("labels") boxes = predictions.bbox colors = self.compute_colors_for_labels(labels).tolist() for box, color in zip(boxes, colors): box = box.to(torch.int64) top_left, bottom_right = box[:2].tolist(), box[2:].tolist() image = cv2.rectangle( image, tuple(top_left), tuple(bottom_right), tuple(color), 1 ) return image
Example #6
Source File: utils.py From integrated-gradient-pytorch with MIT License | 6 votes |
def calculate_outputs_and_gradients(inputs, model, target_label_idx, cuda=False): # do the pre-processing predict_idx = None gradients = [] for input in inputs: input = pre_processing(input, cuda) output = model(input) output = F.softmax(output, dim=1) if target_label_idx is None: target_label_idx = torch.argmax(output, 1).item() index = np.ones((output.size()[0], 1)) * target_label_idx index = torch.tensor(index, dtype=torch.int64) if cuda: index = index.cuda() output = output.gather(1, index) # clear grad model.zero_grad() output.backward() gradient = input.grad.detach().cpu().numpy()[0] gradients.append(gradient) gradients = np.array(gradients) return gradients, target_label_idx
Example #7
Source File: scene_graph_groundtruth.py From NSCL-PyTorch-Release with MIT License | 6 votes |
def __init__(self, vocab, used_concepts): super().__init__() self.vocab = vocab self.used_concepts = used_concepts self.output_dims = [None, 0, 4] self.register_buffer('global2local', torch.zeros(len(self.vocab), dtype=torch.int64)) for k, v in self.used_concepts.items(): if v['type'] != 'attribute': continue self.output_dims[1] += len(v['values']) v = v['values'] self.register_buffer('local2global_{}'.format(k), torch.zeros(len(v), dtype=torch.int64)) for i, vv in enumerate(v): self.global2local[vocab.word2idx[vv]] = i getattr(self, 'local2global_{}'.format(k))[i] = vocab.word2idx[vv]
Example #8
Source File: gplvm.py From pmf-automl with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, dim, X, y, kernel, variance=1.0, N_max=None): super(GP, self).__init__() self.dim = torch.tensor([dim], requires_grad=False) self.kernel = kernel self.variance = torch.nn.Parameter( transform_backward(torch.tensor([variance]))) if torch.is_tensor(X): self.X = X else: self.X = torch.tensor(X, requires_grad=False, dtype=dtype) self.N_max = N_max self.N = self.X.size()[0] if isinstance(y, Sparse1DTensor): self.y = y ix = torch.tensor([k for k in y.ix.keys()], dtype=torch.int64) self.get_batch = BatchIndices(None, ix, self.N_max) else: # NOTE: see (1) self.y = torch.tensor(y.squeeze(), dtype=dtype, requires_grad=False) self.get_batch = BatchIndices(self.N, None, self.N_max)
Example #9
Source File: types.py From chainer-compiler with MIT License | 6 votes |
def torch_dtype_to_np_dtype(dtype): dtype_dict = { torch.bool : np.dtype(np.bool), torch.uint8 : np.dtype(np.uint8), torch.int8 : np.dtype(np.int8), torch.int16 : np.dtype(np.int16), torch.short : np.dtype(np.int16), torch.int32 : np.dtype(np.int32), torch.int : np.dtype(np.int32), torch.int64 : np.dtype(np.int64), torch.long : np.dtype(np.int64), torch.float16 : np.dtype(np.float16), torch.half : np.dtype(np.float16), torch.float32 : np.dtype(np.float32), torch.float : np.dtype(np.float32), torch.float64 : np.dtype(np.float64), torch.double : np.dtype(np.float64), } return dtype_dict[dtype] # ---------------------- InferenceEngine internal types ------------------------
Example #10
Source File: repeat_factor.py From Parsing-R-CNN with MIT License | 6 votes |
def _get_epoch_indices(self, generator): """ Create a list of dataset indices (with repeats) to use for one epoch. Args: generator (torch.Generator): pseudo random number generator used for stochastic rounding. Returns: torch.Tensor: list of dataset indices to use in one epoch. Each index is repeated based on its calculated repeat factor. """ # Since repeat factors are fractional, we use stochastic rounding so # that the target repeat factor is achieved in expectation over the # course of training rands = torch.rand(len(self._frac_part), generator=generator) rep_factors = self._int_part + (rands < self._frac_part).float() # Construct a list of indices in which we repeat images as specified indices = [] for dataset_index, rep_factor in enumerate(rep_factors): indices.extend([dataset_index] * int(rep_factor.item())) return torch.tensor(indices, dtype=torch.int64)
Example #11
Source File: parsing.py From Parsing-R-CNN with MIT License | 6 votes |
def parsing_on_boxes(parsing, rois, heatmap_size): device = rois.device rois = rois.to(torch.device("cpu")) parsing_list = [] for i in range(rois.shape[0]): parsing_ins = parsing[i].cpu().numpy() xmin, ymin, xmax, ymax = torch.round(rois[i]).int() cropped_parsing = parsing_ins[ymin:ymax, xmin:xmax] resized_parsing = cv2.resize( cropped_parsing, (heatmap_size[1], heatmap_size[0]), interpolation=cv2.INTER_NEAREST ) parsing_list.append(torch.from_numpy(resized_parsing)) if len(parsing_list) == 0: return torch.empty(0, dtype=torch.int64, device=device) return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64)
Example #12
Source File: relgraphconv.py From dgl with Apache License 2.0 | 6 votes |
def bdd_message_func(self, edges): """Message function for block-diagonal-decomposition regularizer""" if edges.src['h'].dtype == th.int64 and len(edges.src['h'].shape) == 1: raise TypeError('Block decomposition does not allow integer ID feature.') # calculate msg @ W_r before put msg into edge if self.low_mem: etypes = th.unique(edges.data['type']) msg = th.empty((edges.src['h'].shape[0], self.out_feat), device=edges.src['h'].device) for etype in etypes: loc = edges.data['type'] == etype w = self.weight[etype].view(self.num_bases, self.submat_in, self.submat_out) src = edges.src['h'][loc].view(-1, self.num_bases, self.submat_in) sub_msg = th.einsum('abc,bcd->abd', src, w) sub_msg = sub_msg.reshape(-1, self.out_feat) msg[loc] = sub_msg else: weight = self.weight.index_select(0, edges.data['type']).view( -1, self.submat_in, self.submat_out) node = edges.src['h'].view(-1, 1, self.submat_in) msg = th.bmm(node, weight).view(-1, self.out_feat) if 'norm' in edges.data: msg = msg * edges.data['norm'] return {'msg': msg}
Example #13
Source File: planner.py From PlaNet with MIT License | 6 votes |
def forward(self, belief, state): B, H, Z = belief.size(0), belief.size(1), state.size(1) belief, state = belief.unsqueeze(dim=1).expand(B, self.candidates, H).reshape(-1, H), state.unsqueeze(dim=1).expand(B, self.candidates, Z).reshape(-1, Z) # Initialize factorized belief over action sequences q(a_t:t+H) ~ N(0, I) action_mean, action_std_dev = torch.zeros(self.planning_horizon, B, 1, self.action_size, device=belief.device), torch.ones(self.planning_horizon, B, 1, self.action_size, device=belief.device) for _ in range(self.optimisation_iters): # Evaluate J action sequences from the current belief (over entire sequence at once, batched over particles) actions = (action_mean + action_std_dev * torch.randn(self.planning_horizon, B, self.candidates, self.action_size, device=action_mean.device)).view(self.planning_horizon, B * self.candidates, self.action_size) # Sample actions (time x (batch x candidates) x actions) actions.clamp_(min=self.min_action, max=self.max_action) # Clip action range # Sample next states beliefs, states, _, _ = self.transition_model(state, actions, belief) # Calculate expected returns (technically sum of rewards over planning horizon) returns = self.reward_model(beliefs.view(-1, H), states.view(-1, Z)).view(self.planning_horizon, -1).sum(dim=0) # Re-fit belief to the K best action sequences _, topk = returns.reshape(B, self.candidates).topk(self.top_candidates, dim=1, largest=True, sorted=False) topk += self.candidates * torch.arange(0, B, dtype=torch.int64, device=topk.device).unsqueeze(dim=1) # Fix indices for unrolled actions best_actions = actions[:, topk.view(-1)].reshape(self.planning_horizon, B, self.top_candidates, self.action_size) # Update belief with new means and standard deviations action_mean, action_std_dev = best_actions.mean(dim=2, keepdim=True), best_actions.std(dim=2, unbiased=False, keepdim=True) # Return first action mean ยต_t return action_mean[0].squeeze(dim=1)
Example #14
Source File: utils.py From torchbench with Apache License 2.0 | 5 votes |
def update(self, a, b): n = self.num_classes if self.mat is None: self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device) with torch.no_grad(): k = (a >= 0) & (a < n) inds = n * a[k].to(torch.int64) + b[k] self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
Example #15
Source File: dqn.py From safelife with Apache License 2.0 | 5 votes |
def __init__(self, capacity, num_env, n_step, gamma): self.capacity = capacity self.idx = 0 self.states = np.zeros(capacity, dtype=object) self.actions = np.zeros(capacity, dtype=np.int64) self.rewards = np.zeros(capacity, dtype=np.float32) self.done = np.zeros(capacity, dtype=bool) self.num_env = num_env self.n_step = n_step self.gamma = gamma self.tail_length = n_step * num_env
Example #16
Source File: poolers.py From Clothing-Detection with GNU General Public License v3.0 | 5 votes |
def __call__(self, boxlists): """ Arguments: boxlists (list[BoxList]) """ # Compute level ids s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists])) # Eqn.(1) in FPN paper target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps)) target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max) return target_lvls.to(torch.int64) - self.k_min
Example #17
Source File: loss.py From Clothing-Detection with GNU General Public License v3.0 | 5 votes |
def prepare_targets(self, proposals, targets): labels = [] regression_targets = [] for proposals_per_image, targets_per_image in zip(proposals, targets): matched_targets = self.match_targets_to_proposals( proposals_per_image, targets_per_image ) matched_idxs = matched_targets.get_field("matched_idxs") labels_per_image = matched_targets.get_field("labels") labels_per_image = labels_per_image.to(dtype=torch.int64) # Label background (below the low threshold) bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[bg_inds] = 0 # Label ignore proposals (between low and high thresholds) ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler # compute regression targets regression_targets_per_image = self.box_coder.encode( matched_targets.bbox, proposals_per_image.bbox ) labels.append(labels_per_image) regression_targets.append(regression_targets_per_image) return labels, regression_targets
Example #18
Source File: dqn.py From safelife with Apache License 2.0 | 5 votes |
def optimize(self, report=False): if len(self.replay_buffer) < self.replay_initial: return state, action, reward, next_state, done = \ self.replay_buffer.sample(self.training_batch_size) state = torch.tensor(state, device=self.compute_device, dtype=torch.float32) next_state = torch.tensor(next_state, device=self.compute_device, dtype=torch.float32) action = torch.tensor(action, device=self.compute_device, dtype=torch.int64) reward = torch.tensor(reward, device=self.compute_device, dtype=torch.float32) done = torch.tensor(done, device=self.compute_device, dtype=torch.float32) q_values = self.training_model(state) next_q_values = self.target_model(next_state).detach() q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1) next_q_value, next_action = next_q_values.max(1) discount = self.gamma**self.multi_step_learning * (1 - done) expected_q_value = reward + discount * next_q_value loss = torch.mean((q_value - expected_q_value)**2) self.optimizer.zero_grad() loss.backward() self.optimizer.step() if report and self.data_logger is not None: data = { "loss": loss.item(), "epsilon": self.epsilon, "q_model_mean": q_values.mean().item(), "q_model_max": q_values.max(1)[0].mean().item(), "q_target_mean": next_q_values.mean().item(), "q_target_max": next_q_value.mean().item(), } logger.info( "n=%i: loss=%0.3g, q_mean=%0.3g, q_max=%0.3g", self.num_steps, data['loss'], data['q_model_mean'], data['q_model_max']) self.data_logger.log_scalars(data, self.num_steps, 'dqn')
Example #19
Source File: utils.py From dgl with Apache License 2.0 | 5 votes |
def matmul_maybe_select(A, B): """Perform Matrix multiplication C = A * B but A could be an integer id vector. If A is an integer vector, we treat it as multiplying a one-hot encoded tensor. In this case, the expensive dense matrix multiply can be replaced by a much cheaper index lookup. For example, :: A = [2, 0, 1], B = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]] then matmul_maybe_select(A, B) is equivalent to :: [[0, 0, 1], [[0.1, 0.2], [1, 0, 0], * [0.3, 0.4], [0, 1, 0]] [0.5, 0.6]] In all other cases, perform a normal matmul. Parameters ---------- A : torch.Tensor lhs tensor B : torch.Tensor rhs tensor Returns ------- C : torch.Tensor result tensor """ if A.dtype == th.int64 and len(A.shape) == 1: return B.index_select(0, A) else: return th.matmul(A, B)
Example #20
Source File: layers.py From dgl with Apache License 2.0 | 5 votes |
def _init_input_modules(g, ntype, textset, hidden_dims): # We initialize the linear projections of each input feature ``x`` as # follows: # * If ``x`` is a scalar integral feature, we assume that ``x`` is a categorical # feature, and assume the range of ``x`` is 0..max(x). # * If ``x`` is a float one-dimensional feature, we assume that ``x`` is a # numeric vector. # * If ``x`` is a field of a textset, we process it as bag of words. module_dict = nn.ModuleDict() for column, data in g.nodes[ntype].data.items(): if column == dgl.NID: continue if data.dtype == torch.float32: assert data.ndim == 2 m = nn.Linear(data.shape[1], hidden_dims) nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) module_dict[column] = m elif data.dtype == torch.int64: assert data.ndim == 1 m = nn.Embedding( data.max() + 2, hidden_dims, padding_idx=-1) nn.init.xavier_uniform_(m.weight) module_dict[column] = m if textset is not None: for column, field in textset.fields.items(): if field.vocab.vectors: module_dict[column] = BagOfWordsPretrained(field, hidden_dims) else: module_dict[column] = BagOfWords(field, hidden_dims) return module_dict
Example #21
Source File: sampler.py From mars with Apache License 2.0 | 5 votes |
def __iter__(self): n = len(self.data_source) if self.replacement: # pragma: no cover indices = torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist() self.data_source.prefetch(indices) return iter(indices) else: indices = torch.randperm(n).tolist() self.data_source.prefetch(indices) return iter(indices)
Example #22
Source File: dropblock.py From PyTorch-Encoding with MIT License | 5 votes |
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): idx_key = prefix + 'i' drop_prob_key = prefix + 'drop_prob' if idx_key not in state_dict: state_dict[idx_key] = torch.zeros(1, dtype=torch.int64) if idx_key not in drop_prob_key: state_dict[drop_prob_key] = torch.ones(1, dtype=torch.float32) super(DropBlock2D, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
Example #23
Source File: utils.py From latent-treelstm with MIT License | 5 votes |
def length_to_mask(length): with torch.no_grad(): batch_size = length.shape[0] max_length = length.data.max() range = torch.arange(max_length, dtype=torch.int64, device=length.device) range_expanded = range[None, :].expand(batch_size, max_length) length_expanded = length[:, None].expand_as(range_expanded) return (range_expanded < length_expanded).float()
Example #24
Source File: training.py From Self-Supervised-Gans-Pytorch with MIT License | 5 votes |
def _generator_train_iteration(self, generated_data, batch_size): """ """ self.G_opt.zero_grad() # Calculate loss and optimize _, g_fake_pro_logits, g_fake_rot_logits, g_fake_rot_prob = self.D(generated_data) g_loss = - torch.sum(g_fake_pro_logits) # add auxiliary rotation loss rot_labels = torch.zeros(4*batch_size,).cuda() for i in range(4*batch_size): if i < batch_size: rot_labels[i] = 0 elif i < 2*batch_size: rot_labels[i] = 1 elif i < 3*batch_size: rot_labels[i] = 2 else: rot_labels[i] = 3 rot_labels = F.one_hot(rot_labels.to(torch.int64), 4).float() g_fake_class_loss = torch.sum(F.binary_cross_entropy_with_logits( input = g_fake_rot_logits, target = rot_labels)) g_loss += self.weight_rotation_loss_g * g_fake_class_loss g_loss.backward(retain_graph=True) self.G_opt.step() # Record loss self.losses['G'].append(g_loss.data)
Example #25
Source File: training.py From Self-Supervised-Gans-Pytorch with MIT License | 5 votes |
def _critic_train_iteration(self, data, generated_data, batch_size): """ """ # Calculate probabilities on real and generated data data = Variable(data) if self.use_cuda: data = data.cuda() _, d_real_pro_logits, d_real_rot_logits, d_real_rot_prob = self.D(data) _, g_fake_pro_logits, g_fake_rot_logits, g_fake_rot_prob = self.D(generated_data) # Get gradient penalty gradient_penalty = self._gradient_penalty(data, generated_data) self.losses['GP'].append(gradient_penalty.data) # Create total loss and optimize self.D_opt.zero_grad() d_loss = torch.sum(g_fake_pro_logits) - torch.sum(d_real_pro_logits) + gradient_penalty # Add auxiiary rotation loss rot_labels = torch.zeros(4*batch_size).cuda() for i in range(4*batch_size): if i < batch_size: rot_labels[i] = 0 elif i < 2*batch_size: rot_labels[i] = 1 elif i < 3*batch_size: rot_labels[i] = 2 else: rot_labels[i] = 3 rot_labels = F.one_hot(rot_labels.to(torch.int64), 4).float() d_real_class_loss = torch.sum(F.binary_cross_entropy_with_logits( input = d_real_rot_logits, target = rot_labels)) d_loss += self.weight_rotation_loss_d * d_real_class_loss d_loss.backward(retain_graph=True) self.D_opt.step() # Record loss self.losses['D'].append(d_loss.data)
Example #26
Source File: off.py From pytorch_geometric with MIT License | 5 votes |
def face_to_tri(face): face = [[int(x) for x in line.strip().split()] for line in face] triangle = torch.tensor([line[1:] for line in face if line[0] == 3]) triangle = triangle.to(torch.int64) rect = torch.tensor([line[1:] for line in face if line[0] == 4]) rect = rect.to(torch.int64) if rect.numel() > 0: first, second = rect[:, [0, 1, 2]], rect[:, [1, 2, 3]] return torch.cat([triangle, first, second], dim=0).t().contiguous() else: return triangle.t().contiguous()
Example #27
Source File: ptBEV.py From PolarSeg with BSD 3-Clause "New" or "Revised" License | 5 votes |
def grp_range_torch(a,dev): idx = torch.cumsum(a,0) id_arr = torch.ones(idx[-1],dtype = torch.int64,device=dev) id_arr[0] = 0 id_arr[idx[:-1]] = -a[:-1]+1 return torch.cumsum(id_arr,0)
Example #28
Source File: visualize_flow.py From residual-flows with MIT License | 5 votes |
def plt_flow_density(prior_logdensity, inverse_transform, ax, npts=100, memory=100, title="$q(x)$", device="cpu"): side = np.linspace(LOW, HIGH, npts) xx, yy = np.meshgrid(side, side) x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)]) x = torch.from_numpy(x).type(torch.float32).to(device) zeros = torch.zeros(x.shape[0], 1).to(x) z, delta_logp = [], [] inds = torch.arange(0, x.shape[0]).to(torch.int64) for ii in torch.split(inds, int(memory**2)): z_, delta_logp_ = inverse_transform(x[ii], zeros[ii]) z.append(z_) delta_logp.append(delta_logp_) z = torch.cat(z, 0) delta_logp = torch.cat(delta_logp, 0) logpz = prior_logdensity(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z) logpx = logpz - delta_logp px = np.exp(logpx.cpu().numpy()).reshape(npts, npts) ax.imshow(px, cmap='inferno') ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) ax.set_title(title)
Example #29
Source File: visualize_flow.py From residual-flows with MIT License | 5 votes |
def plt_flow_samples(prior_sample, transform, ax, npts=100, memory=100, title="$x ~ q(x)$", device="cpu"): z = prior_sample(npts * npts, 2).type(torch.float32).to(device) zk = [] inds = torch.arange(0, z.shape[0]).to(torch.int64) for ii in torch.split(inds, int(memory**2)): zk.append(transform(z[ii])) zk = torch.cat(zk, 0).cpu().numpy() ax.hist2d(zk[:, 0], zk[:, 1], range=[[LOW, HIGH], [LOW, HIGH]], bins=npts, cmap='inferno') ax.invert_yaxis() ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) ax.set_title(title)
Example #30
Source File: dropblock.py From PyTorch-Encoding with MIT License | 5 votes |
def __init__(self, drop_prob, block_size, share_channel=False): super(DropBlock2D, self).__init__() self.register_buffer('i', torch.zeros(1, dtype=torch.int64)) self.register_buffer('drop_prob', drop_prob * torch.ones(1, dtype=torch.float32)) self.inited = False self.step_size = 0.0 self.start_step = 0 self.nr_steps = 0 self.block_size = block_size self.share_channel = share_channel