Python torch.nn.functional.adaptive_max_pool2d() Examples

The following are 30 code examples of torch.nn.functional.adaptive_max_pool2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: model.py    From sgas with MIT License 6 votes vote down vote up
def forward(self, x):
        logits_aux = None
        s0 = s1 = self.stem(x)
        pre_layers = [s1]
        for i, cell in enumerate(self.cells):
            s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
            pre_layers.append(s1)

            if i == 2 * self._layers // 3:
                if self._auxiliary and self.training:
                    logits_aux = self.auxiliary_head(s1).squeeze(-1).squeeze(-1)

        fusion = torch.cat(pre_layers, dim=1)
        fusion = self.fusion_conv(fusion)
        x1 = F.adaptive_max_pool2d(fusion, 1)
        x2 = F.adaptive_avg_pool2d(fusion, 1)
        logits = self.classifier(torch.cat((x1, x2), dim=1)).squeeze(-1).squeeze(-1)
        return logits, logits_aux 
Example #2
Source File: model_search.py    From sgas with MIT License 6 votes vote down vote up
def forward(self, x):
        s0 = s1 = self.stem(x)
        pre_layers = [s1]
        for i, cell in enumerate(self.cells):
            weights = []
            n = 2
            start = 0
            for _ in range(self._steps):
                end = start + n
                for j in range(start, end):
                    weights.append(F.softmax(self.alphas_normal[j], dim=-1))
                start = end
                n += 1

            selected_idxs = self.normal_selected_idxs
            s0, s1 = s1, cell(s0, s1, weights, selected_idxs)
            pre_layers.append(s1)

        fusion = torch.cat(pre_layers, dim=1)
        fusion = self.fusion_conv(fusion)
        x1 = F.adaptive_max_pool2d(fusion, 1)
        x2 = F.adaptive_avg_pool2d(fusion, 1)
        logits = self.classifier(torch.cat((x1, x2), dim=1))
        return logits.squeeze(-1).squeeze(-1) 
Example #3
Source File: DenseNet.py    From hyperbolic-image-embeddings with MIT License 6 votes vote down vote up
def forward(self, x, feature=False):
        features = self.features(x)
        out = F.relu(features, inplace=True)
        out = F.adaptive_max_pool2d(out, (1, 1)).view(features.size(0), -1)
        #         out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
        if self.classifier is None:
            if feature:
                return out, None
            else:
                return out
        if feature:
            out1 = self.classifier(out)
            return out, out1

        out = self.classifier(out)
        return out 
Example #4
Source File: attention.py    From torchsupport with MIT License 6 votes vote down vote up
def forward(self, inputs):
    scaled_inputs = None
    if self.scale:
      scaled_inputs = func.max_pool2d(inputs, self.scale)
    elif self.size:
      scaled_inputs = func.adaptive_max_pool2d(inputs, self.size)
    else:
      scaled_inputs = inputs

    query = self.query(inputs).view(inputs.size(0), self.attention_size, -1)
    key = self.key(scaled_inputs).view(scaled_inputs.size(0), self.attention_size, -1)
    value = self.value(scaled_inputs).view(scaled_inputs.size(0), self.attention_size, -1)

    key = key.permute(0, 2, 1)
    assignment = (key @ query).softmax(dim=1)
    result = value @ assignment
    result = result.view(inputs.size(0), self.attention_size, *inputs.shape[2:])

    return self.project(result) + inputs 
Example #5
Source File: pspnet.py    From Single-Human-Parsing-LIP with MIT License 6 votes vote down vote up
def forward(self, x):
        f, class_f = self.feats(x) 
        p = self.psp(f)
        p = self.drop_1(p)

        p = self.up_1(p)
        p = self.drop_2(p)

        p = self.up_2(p)
        p = self.drop_2(p)

        p = self.up_3(p)
        p = self.drop_2(p)

        auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).view(-1, class_f.size(1))

        return self.final(p), self.classifier(auxiliary) 
Example #6
Source File: stereo_focal_loss.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def __init__(
            self, max_disp, start_disp=0,
            dilation=1, weights=None,
            focal_coefficient=0.0,
            sparse=False
    ):
        self.max_disp = max_disp
        self.start_disp = start_disp
        self.end_disp = self.max_disp + self.start_disp - 1
        self.dilation = dilation
        self.weights = weights
        self.focal_coefficient = focal_coefficient
        self.sparse = sparse
        if sparse:
            # sparse disparity ==> max_pooling
            self.scale_func = F.adaptive_max_pool2d
        else:
            # dense disparity ==> avg_pooling
            self.scale_func = F.adaptive_avg_pool2d 
Example #7
Source File: pooler.py    From easy-faster-rcnn.pytorch with MIT License 6 votes vote down vote up
def apply(features: Tensor, proposal_bboxes: Tensor, proposal_batch_indices: Tensor, mode: Mode) -> Tensor:
        _, _, feature_map_height, feature_map_width = features.shape
        scale = 1 / 16
        output_size = (7 * 2, 7 * 2)

        if mode == Pooler.Mode.POOLING:
            pool = []
            for (proposal_bbox, proposal_batch_index) in zip(proposal_bboxes, proposal_batch_indices):
                start_x = max(min(round(proposal_bbox[0].item() * scale), feature_map_width - 1), 0)      # [0, feature_map_width)
                start_y = max(min(round(proposal_bbox[1].item() * scale), feature_map_height - 1), 0)     # (0, feature_map_height]
                end_x = max(min(round(proposal_bbox[2].item() * scale) + 1, feature_map_width), 1)        # [0, feature_map_width)
                end_y = max(min(round(proposal_bbox[3].item() * scale) + 1, feature_map_height), 1)       # (0, feature_map_height]
                roi_feature_map = features[proposal_batch_index, :, start_y:end_y, start_x:end_x]
                pool.append(F.adaptive_max_pool2d(input=roi_feature_map, output_size=output_size))
            pool = torch.stack(pool, dim=0)
        elif mode == Pooler.Mode.ALIGN:
            pool = ROIAlign(output_size, spatial_scale=scale, sampling_ratio=0)(
                features,
                torch.cat([proposal_batch_indices.view(-1, 1).float(), proposal_bboxes], dim=1)
            )
        else:
            raise ValueError

        pool = F.max_pool2d(input=pool, kernel_size=2, stride=2)
        return pool 
Example #8
Source File: bfp.py    From FoveaBox with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #9
Source File: bfp.py    From AerialDetection with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #10
Source File: BN_Inception.py    From Deep_Metric with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = self.features(x)
        x = F.adaptive_max_pool2d(x, output_size=1)
        x = x.view(x.size(0), -1)
        if self.dim == 0:
            return x
        x = self.classifier(x)
        return x 
Example #11
Source File: bfp.py    From CenterNet with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #12
Source File: bfp.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        """Forward function."""
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #13
Source File: multiscaleloss.py    From FlowNetPytorch with MIT License 5 votes vote down vote up
def sparse_max_pool(input, size):
    '''Downsample the input by considering 0 values as invalid.

    Unfortunately, no generic interpolation mode can resize a sparse map correctly,
    the strategy here is to use max pooling for positive values and "min pooling"
    for negative values, the two results are then summed.
    This technique allows sparsity to be minized, contrary to nearest interpolation,
    which could potentially lose information for isolated data points.'''

    positive = (input > 0).float()
    negative = (input < 0).float()
    output = F.adaptive_max_pool2d(input * positive, size) - F.adaptive_max_pool2d(-input * negative, size)
    return output 
Example #14
Source File: mobilenet_utils.py    From Auto-PyTorch with Apache License 2.0 5 votes vote down vote up
def select_adaptive_pool2d(x, pool_type='avg', output_size=1):
    """Selectable global pooling function with dynamic input kernel size
    """
    if pool_type == 'avg':
        x = F.adaptive_avg_pool2d(x, output_size)
    elif pool_type == 'avgmax':
        x = adaptive_avgmax_pool2d(x, output_size)
    elif pool_type == 'catavgmax':
        x = adaptive_catavgmax_pool2d(x, output_size)
    elif pool_type == 'max':
        x = F.adaptive_max_pool2d(x, output_size)
    else:
        assert False, 'Invalid pool type: %s' % pool_type
    return x 
Example #15
Source File: mobilenet_utils.py    From Auto-PyTorch with Apache License 2.0 5 votes vote down vote up
def adaptive_catavgmax_pool2d(x, output_size=1):
    x_avg = F.adaptive_avg_pool2d(x, output_size)
    x_max = F.adaptive_max_pool2d(x, output_size)
    return torch.cat((x_avg, x_max), 1) 
Example #16
Source File: mobilenet_utils.py    From Auto-PyTorch with Apache License 2.0 5 votes vote down vote up
def adaptive_avgmax_pool2d(x, output_size=1):
    x_avg = F.adaptive_avg_pool2d(x, output_size)
    x_max = F.adaptive_max_pool2d(x, output_size)
    return 0.5 * (x_avg + x_max) 
Example #17
Source File: bfp.py    From ttfnet with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #18
Source File: bfp.py    From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #19
Source File: bfp.py    From Libra_R-CNN with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #20
Source File: vision_net.py    From Sound-of-Pixels with MIT License 5 votes vote down vote up
def forward(self, x, pool=True):
        x = self.features(x)
        x = self.fc(x)

        if not pool:
            return x

        if self.pool_type == 'avgpool':
            x = F.adaptive_avg_pool2d(x, 1)
        elif self.pool_type == 'maxpool':
            x = F.adaptive_max_pool2d(x, 1)

        x = x.view(x.size(0), x.size(1))
        return x 
Example #21
Source File: vision_net.py    From Sound-of-Pixels with MIT License 5 votes vote down vote up
def forward(self, x, pool=True):
        x = self.features(x)
        x = self.fc(x)

        if not pool:
            return x

        if self.pool_type == 'avgpool':
            x = F.adaptive_avg_pool2d(x, 1)
        elif self.pool_type == 'maxpool':
            x = F.adaptive_max_pool2d(x, 1)

        x = x.view(x.size(0), x.size(1))
        return x 
Example #22
Source File: model_helper.py    From SSD_Pytorch with MIT License 5 votes vote down vote up
def adaptive_pool(x, size):
    return F.adaptive_max_pool2d(x, size) 
Example #23
Source File: wrapper.py    From easy-fpn.pytorch with MIT License 5 votes vote down vote up
def apply(features: Tensor, proposal_bboxes: Tensor, mode: Mode, image_width: int, image_height: int) -> Tensor:
        _, _, feature_map_height, feature_map_width = features.shape
        proposal_bboxes = proposal_bboxes.detach()

        scale_x = image_width / feature_map_width
        scale_y = image_height / feature_map_height

        if mode == Wrapper.Mode.POOLING:
            pool = []
            for proposal_bbox in proposal_bboxes:
                start_x = max(min(round(proposal_bbox[0].item() / scale_x), feature_map_width - 1), 0)      # [0, feature_map_width)
                start_y = max(min(round(proposal_bbox[1].item() / scale_y), feature_map_height - 1), 0)     # (0, feature_map_height]
                end_x = max(min(round(proposal_bbox[2].item() / scale_x) + 1, feature_map_width), 1)        # [0, feature_map_width)
                end_y = max(min(round(proposal_bbox[3].item() / scale_y) + 1, feature_map_height), 1)       # (0, feature_map_height]
                roi_feature_map = features[..., start_y:end_y, start_x:end_x]
                pool.append(F.adaptive_max_pool2d(input=roi_feature_map, output_size=7))
            pool = torch.cat(pool, dim=0)
        elif mode == Wrapper.Mode.ALIGN:
            x1 = proposal_bboxes[:, 0::4] / scale_x
            y1 = proposal_bboxes[:, 1::4] / scale_y
            x2 = proposal_bboxes[:, 2::4] / scale_x
            y2 = proposal_bboxes[:, 3::4] / scale_y

            crops = CropAndResizeFunction(crop_height=7 * 2, crop_width=7 * 2)(
                features,
                torch.cat([y1 / (feature_map_height - 1), x1 / (feature_map_width - 1),
                           y2 / (feature_map_height - 1), x2 / (feature_map_width - 1)],
                          dim=1),
                torch.zeros(proposal_bboxes.shape[0], dtype=torch.int, device=proposal_bboxes.device)
            )
            pool = F.max_pool2d(input=crops, kernel_size=2, stride=2)
        else:
            raise ValueError

        return pool 
Example #24
Source File: pspnet.py    From BraTs with MIT License 5 votes vote down vote up
def forward(self, x):
        #f, class_f = self.feats(x)
        f, _ = self.feats(x)
        p = self.psp(f)
        p = self.up1(p)
        p = self.up2(p)
        p = self.up3(p)
        #auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).\
        #            view(-1, class_f.size(1))
        p = self.final(p)
        p = softmax(p, dim=1)
        return p#, self.classifier(auxiliary) 
Example #25
Source File: preact_resnet.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        out = self.conv1(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = F.adaptive_max_pool2d(out, 1)
        out = out.view(out.size(0), -1)
        return F.log_softmax(self.linear(out)) 
Example #26
Source File: senet.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = F.adaptive_max_pool2d(out, 1)
        out = out.view(out.size(0), -1)
        out = F.log_softmax(self.linear(out))
        return out 
Example #27
Source File: dpn.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def logits(self, features):
        x = F.adaptive_max_pool2d(features, 1)
        x = x.view(x.size(0), -1)
        return self.classifier(x) 
Example #28
Source File: dpn.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def logits(self, features):
        x = F.adaptive_max_pool2d(features, 1)
        x = x.view(x.size(0), -1)
        return self.classifier(x) 
Example #29
Source File: conf_nll_loss.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def __init__(self, max_disp, start_disp=0, weights=None, sparse=False):
        self.max_disp = max_disp
        self.start_disp = start_disp
        self.weights = weights
        self.sparse = sparse
        if sparse:
            # sparse disparity ==> max_pooling
            self.scale_func = F.adaptive_max_pool2d
        else:
            # dense disparity ==> avg_pooling
            self.scale_func = F.adaptive_avg_pool2d 
Example #30
Source File: decoder.py    From mlcomp with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        x = F.adaptive_max_pool2d(x, output_size=(1, 1))
        x = x.view(-1, x.size(1))
        x = self.linear(x)
        return x