Python torch.nn.functional.adaptive_avg_pool2d() Examples
The following are 30
code examples of torch.nn.functional.adaptive_avg_pool2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: htc_roi_head.py From mmdetection with Apache License 2.0 | 6 votes |
def _bbox_forward(self, stage, x, rois, semantic_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = F.adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) return bbox_results
Example #2
Source File: grad_cam.py From grad-cam-pytorch with MIT License | 6 votes |
def generate(self, target_layer): fmaps = self._find(self.fmap_pool, target_layer) grads = self._find(self.grad_pool, target_layer) weights = F.adaptive_avg_pool2d(grads, 1) gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True) gcam = F.relu(gcam) gcam = F.interpolate( gcam, self.image_shape, mode="bilinear", align_corners=False ) B, C, H, W = gcam.shape gcam = gcam.view(B, -1) gcam -= gcam.min(dim=1, keepdim=True)[0] gcam /= gcam.max(dim=1, keepdim=True)[0] gcam = gcam.view(B, C, H, W) return gcam
Example #3
Source File: multiscale.py From torchsupport with MIT License | 6 votes |
def forward(self, input): upsize = tuple(input.size()[-2:]) if self.is_module: out = self.modules(input) out = func.adaptive_avg_pool2d(input, 1) out = func.interpolate(out, size=upsize) for idx, output in enumerate(self.outputs): self.outputs[idx] = func.interpolate(output, size=upsize, mode='bilinear') outputs = self.outputs + [out] self.outputs = [] else: out = input outputs = [] for idx, module in enumerate(self.modules): out = module(outputs) if idx in self.branch: outputs.append(out) out = func.adaptive_avg_pool2d(input, 1) out = func.interpolate(out, size=upsize) outputs.append(out) if self.refine: for idx, output in enumerate(outputs): if idx < len(outputs) - 1: outputs[idx] = self.attention_refinements(idx) * output return torch.cat(outputs, dim=1)
Example #4
Source File: clustering.py From torchsupport with MIT License | 6 votes |
def expectation(self): self.net.eval() with torch.no_grad(): embedding = [] batch_loader = DataLoader( self.data, batch_size=self.batch_size, shuffle=False ) for point, *_ in batch_loader: features, mean, logvar = self.net(point.to(self.device)) std = torch.exp(0.5 * logvar) sample = torch.randn_like(std).mul(std).add_(mean) latent_point = func.adaptive_avg_pool2d(sample, 1) latent_point = latent_point latent_point = latent_point.reshape(latent_point.size(0), -1) embedding.append(latent_point) embedding = torch.cat(embedding, dim=0) expectation = self.classifier(embedding) self.net.train() return expectation.to("cpu"), embedding.to("cpu")
Example #5
Source File: conditional_mnist_score_classifier.py From torchsupport with MIT License | 6 votes |
def forward(self, inputs, noise): out = self.input(inputs) cond = torch.zeros( inputs.size(0), 10, dtype=inputs.dtype, device=inputs.device ) offset = (torch.log(noise) / torch.log(torch.tensor(0.60))).long() cond[torch.arange(inputs.size(0)), offset.view(-1)] = 1 connections = [] for norm, block in zip(self.down_norm, self.down): out = func.elu(block(norm(out, cond))) connections.append(out) features = func.adaptive_avg_pool2d(out, 1) logits = self.predict(features.view(features.size(0), -1)) for norm, block, shortcut in zip(self.up_norm, self.up, reversed(connections)): out = func.elu(block(norm(torch.cat((out, shortcut), dim=1), cond))) del connections return self.output(out), logits
Example #6
Source File: util.py From swiftnet with GNU General Public License v3.0 | 6 votes |
def forward(self, x): levels = [] target_size = self.fixed_size if self.fixed_size is not None else x.size()[2:4] ar = target_size[1] / target_size[0] x = self.spp[0].forward(x) levels.append(x) num = len(self.spp) - 1 for i in range(1, num): if not self.square_grid: grid_size = (self.grids[i - 1], max(1, round(ar * self.grids[i - 1]))) x_pooled = F.adaptive_avg_pool2d(x, grid_size) else: x_pooled = F.adaptive_avg_pool2d(x, self.grids[i - 1]) level = self.spp[i].forward(x_pooled) level = self.upsampling_method(level, target_size) levels.append(level) x = torch.cat(levels, 1) x = self.spp[-1].forward(x) return x
Example #7
Source File: googlenet.py From Deep-Metric-Learning-Baselines with Apache License 2.0 | 6 votes |
def forward(self, x): # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = F.adaptive_avg_pool2d(x, (4, 4)) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = x.view(x.size(0), -1) # N x 2048 x = F.relu(self.fc1(x), inplace=True) # N x 2048 x = F.dropout(x, 0.7, training=self.training) # N x 2048 x = self.fc2(x) # N x 1024 return x
Example #8
Source File: resnet.py From kaggle-human-protein-atlas-image-classification with Apache License 2.0 | 6 votes |
def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(self.dropout(x)) feat = x x = self.out(x) x = F.adaptive_avg_pool2d(x, (1, 1)) x = torch.sigmoid(x) # relu + tanh? thresholded? x = torch.squeeze(x) return {'logit': x, 'feat': feat}
Example #9
Source File: hrnet.py From Parsing-R-CNN with MIT License | 6 votes |
def forward(self, x): assert len(self.branches) == len(x) x = [branch(b) for branch, b in zip(self.branches, x)] if self.use_global: x_global = [F.adaptive_avg_pool2d(b, 1) for b in x] x_global = torch.cat(tuple(x_global), 1) x_fused = [] for i in range(len(self.fuse_layers)): for j in range(0, len(self.branches)): if j == 0: x_fused.append(self.fuse_layers[i][0](x[0])) else: x_fused[i] = x_fused[i] + self.fuse_layers[i][j](x[j]) if self.use_global: x_fused[i] = x_fused[i] * self.global_layers[i](x_global) for i in range(len(x_fused)): x_fused[i] = self.relu(x_fused[i]) return x_fused
Example #10
Source File: resnet_utils.py From AAT with MIT License | 6 votes |
def forward(self, img, att_size=14): x = img.unsqueeze(0) x = self.resnet.conv1(x) x = self.resnet.bn1(x) x = self.resnet.relu(x) x = self.resnet.maxpool(x) x = self.resnet.layer1(x) x = self.resnet.layer2(x) x = self.resnet.layer3(x) x = self.resnet.layer4(x) fc = x.mean(3).mean(2).squeeze() att = F.adaptive_avg_pool2d(x,[att_size,att_size]).squeeze().permute(1, 2, 0) return fc, att
Example #11
Source File: sononet.py From Attention-Gated-Networks with MIT License | 6 votes |
def forward(self, inputs): # Feature Extraction conv1 = self.conv1(inputs) maxpool1 = self.maxpool1(conv1) conv2 = self.conv2(maxpool1) maxpool2 = self.maxpool2(conv2) conv3 = self.conv3(maxpool2) maxpool3 = self.maxpool3(conv3) conv4 = self.conv4(maxpool3) maxpool4 = self.maxpool4(conv4) conv5 = self.conv5(maxpool4) conv5_p = self.conv5_p(conv5) conv6_p = self.conv6_p(conv5_p) batch_size = inputs.shape[0] pooled = F.adaptive_avg_pool2d(conv6_p, (1, 1)).view(batch_size, -1) return pooled
Example #12
Source File: SwiftNet.py From Fast_Seg with Apache License 2.0 | 6 votes |
def forward(self, x): levels = [] target_size = x.size()[2:4] ar = target_size[1] / target_size[0] x = self.spp[0].forward(x) levels.append(x) num = len(self.spp) - 1 for i in range(1, num): if not self.square_grid: grid_size = (self.grids[i - 1], max(1, round(ar * self.grids[i - 1]))) x_pooled = F.adaptive_avg_pool2d(x, grid_size) else: x_pooled = F.adaptive_avg_pool2d(x, self.grids[i - 1]) level = self.spp[i].forward(x_pooled) level = upsample(level, target_size) levels.append(level) x = torch.cat(levels, 1) x = self.spp[-1].forward(x) return x
Example #13
Source File: conditional_mnist_score_classifier.py From torchsupport with MIT License | 5 votes |
def forward(self, inputs, noise, *args): with torch.enable_grad(): make_differentiable(inputs) cond = torch.zeros( inputs.size(0), 10, dtype=inputs.dtype, device=inputs.device ) offset = (torch.log(noise) / torch.log(torch.tensor(0.60))).long() cond[torch.arange(inputs.size(0)), offset.view(-1)] = 1 out = self.preprocess(inputs) count = 0 for bn, proj, block in zip(self.bn, self.project, self.blocks): out = func.elu(bn(proj(out) + block(out), cond)) count += 1 if count % 5 == 0: out = func.avg_pool2d(out, 2) out = self.postprocess(out) out = func.adaptive_avg_pool2d(out, 1).view(-1, 128) logits = self.predict(out) energy = -logits.logsumexp(dim=1) score = -torch.autograd.grad( energy, inputs, torch.ones_like(energy), create_graph=True, retain_graph=True )[0] return score, logits
Example #14
Source File: htc.py From mmdetection-annotated with Apache License 2.0 | 5 votes |
def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None): rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = F.adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) return loss_bbox, rois, bbox_targets, bbox_pred
Example #15
Source File: model.py From fast-autoaugment with MIT License | 5 votes |
def forward(self, inputs, drop_connect_rate=None): """ :param inputs: input tensor :param drop_connect_rate: drop connect rate (float, between 0 and 1) :return: output of block """ if self._is_condconv(): feat = F.adaptive_avg_pool2d(inputs, 1).flatten(1) routing_w = torch.sigmoid(self.routing_fn(feat)) if self._block_args.expand_ratio != 1: _expand_conv = partial(self._expand_conv, routing_weights=routing_w) _depthwise_conv = partial(self._depthwise_conv, routing_weights=routing_w) _project_conv = partial(self._project_conv, routing_weights=routing_w) else: if self._block_args.expand_ratio != 1: _expand_conv = self._expand_conv _depthwise_conv, _project_conv = self._depthwise_conv, self._project_conv # Expansion and Depthwise Convolution x = inputs if self._block_args.expand_ratio != 1: x = self._swish(self._bn0(_expand_conv(inputs))) x = self._swish(self._bn1(_depthwise_conv(x))) # Squeeze and Excitation if self.has_se: x_squeezed = F.adaptive_avg_pool2d(x, 1) x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed))) x = torch.sigmoid(x_squeezed) * x x = self._bn2(_project_conv(x)) # Skip connection and drop connect input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: if drop_connect_rate: x = drop_connect(x, drop_p=drop_connect_rate, training=self.training) x = x + inputs # skip connection return x
Example #16
Source File: wideresnet.py From fast-autoaugment with MIT License | 5 votes |
def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.relu(self.bn1(out)) # out = F.avg_pool2d(out, 8) out = F.adaptive_avg_pool2d(out, (1, 1)) out = out.view(out.size(0), -1) out = self.linear(out) return out
Example #17
Source File: pix2pix.py From torchsupport with MIT License | 5 votes |
def __getitem__(self, index): position = self.indices[index] img, _ = self.data[position] img = torch.tensor(np.array(img)).permute(2, 0, 1).to(torch.float) / 255 edge = img[:, :, :256].unsqueeze(0) shoe = img[:, :, 256:].unsqueeze(0) edge = func.adaptive_max_pool2d(1 - edge, (28, 28)) shoe = func.adaptive_avg_pool2d(shoe, (28, 28)) return edge[0], shoe[0]
Example #18
Source File: resnest_models.py From openseg.pytorch with MIT License | 5 votes |
def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn0(x) if self.dropblock_prob > 0.0: x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: splited = torch.split(x, rchannel//self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) if self.use_bn: gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: attens = torch.split(atten, rchannel//self.radix, dim=1) out = sum([att*split for (att, split) in zip(attens, splited)]) else: out = atten * x return out.contiguous()
Example #19
Source File: resnext.py From pytorch_image_classification with MIT License | 5 votes |
def _forward_conv(self, x): x = F.relu(self.bn(self.conv(x)), inplace=True) x = self.stage1(x) x = self.stage2(x) x = self.stage3(x) x = F.adaptive_avg_pool2d(x, output_size=1) return x
Example #20
Source File: resnest_models.py From openseg.pytorch with MIT License | 5 votes |
def forward(self, inputs): return F.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
Example #21
Source File: cycle_gan.py From torchsupport with MIT License | 5 votes |
def forward(self, data): out = func.relu(self.preprocess(data)) for block in self.blocks: out = func.relu(block(out)) out = func.max_pool2d(out, 2) out = func.adaptive_avg_pool2d(out, 1).view(out.size(0), -1) return self.postprocess(out)
Example #22
Source File: cycle_gan.py From torchsupport with MIT License | 5 votes |
def __getitem__(self, index): position = self.indices[index] img, _ = self.data[position] img = torch.tensor(np.array(img)).permute(2, 0, 1).to(torch.float) / 255 edge = img[:, :, :256].unsqueeze(0) shoe = img[:, :, 256:].unsqueeze(0) edge = func.adaptive_max_pool2d(1 - edge, (28, 28)) shoe = func.adaptive_avg_pool2d(shoe, (28, 28)) return edge[0], shoe[0]
Example #23
Source File: conditional_mnist_ebm.py From torchsupport with MIT License | 5 votes |
def forward(self, inputs, condition): out = self.preprocess(inputs) for bn, block in zip(self.bn, self.blocks): out = func.relu(bn(out + block(out))) out = func.avg_pool2d(out, 2) out = self.postprocess(out) out = func.adaptive_avg_pool2d(out, 1).view(-1, 128) cond = self.condition(condition) result = self.combine(func.relu(out + cond)) return result
Example #24
Source File: flowers_consistent_gan.py From torchsupport with MIT License | 5 votes |
def levels(self, data): upper = data middle = upper[:, 64:-64, 64:-64] lower = middle[:, 32:-32, 32:-32] upper = func.adaptive_avg_pool2d(upper.unsqueeze(0), 64)[0] middle = func.adaptive_avg_pool2d(middle.unsqueeze(0), 64)[0] return (upper, middle, lower)
Example #25
Source File: mnist_off_ebm.py From torchsupport with MIT License | 5 votes |
def forward(self, inputs): out = self.preprocess(inputs) for block in self.blocks: out = func.relu(out + block(out)) out = func.avg_pool2d(out, 2) out = func.adaptive_avg_pool2d(out, 1).view(-1, 32) out = self.postprocess(out) return out
Example #26
Source File: flowers_consistent_gan.py From torchsupport with MIT License | 5 votes |
def forward(self, available_input, requested_input, available, requested): inputs = torch.cat((available_input, requested_input, available, requested), dim=1) out = self.preprocess(inputs) for idx, (block, bn) in enumerate(zip(self.encoder, self.encoder_norm)): out = func.elu(block(bn(out))) + out if (idx + 1) % 2 == 0: out = func.max_pool2d(out, 2) out = func.adaptive_avg_pool2d(out, 1).view(out.size(0), -1) result = self.decide(out) return result
Example #27
Source File: flowers_consistent_gan.py From torchsupport with MIT License | 5 votes |
def __call__(self, x): result = func.adaptive_avg_pool2d(x, (256, 256)) return result
Example #28
Source File: coinrun.py From torchsupport with MIT License | 5 votes |
def forward(self, inputs, **kwargs): result = self.blocks(inputs) result = func.adaptive_avg_pool2d(result, 1).view(result.size(0), -1) result = self.postprocess(result) return result
Example #29
Source File: augmented_cycle_gan.py From torchsupport with MIT License | 5 votes |
def forward(self, data): out = func.relu(self.preprocess(data)) for block in self.blocks: out = func.relu(block(out)) out = func.max_pool2d(out, 2) out = func.adaptive_avg_pool2d(out, 1).view(out.size(0), -1) return self.postprocess(out)
Example #30
Source File: conditional_cifar_classifier.py From torchsupport with MIT License | 5 votes |
def forward(self, inputs, *args): out = self.preprocess(inputs) count = 0 for bn, proj, block in zip(self.bn, self.project, self.blocks): out = bn(proj(out) + block(out)) count += 1 if count % 5 == 0: out = func.avg_pool2d(out, 2) out = self.postprocess(out) out = func.adaptive_avg_pool2d(out, 1).view(-1, 128) return self.predict(out)