Python torch.nn.functional.max_pool2d() Examples
The following are 30
code examples of torch.nn.functional.max_pool2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: attention.py From nsf with MIT License | 6 votes |
def forward(self, inputs, y=None): # Apply convs theta = self.theta(inputs) phi = F.max_pool2d(self.phi(inputs), [2, 2]) g = F.max_pool2d(self.g(inputs), [2, 2]) # Perform reshapes theta = theta.view(-1, self.channels // self.heads, inputs.shape[2] * inputs.shape[3]) phi = phi.view(-1, self.channels // self.heads, inputs.shape[2] * inputs.shape[3] // 4) g = g.view(-1, self.channels // 2, inputs.shape[2] * inputs.shape[3] // 4) # Matmul and softmax to get attention maps beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) # Attention map times g path o = self.o(torch.bmm(g, beta.transpose(1, 2)).view(-1, self.channels // 2, inputs.shape[2], inputs.shape[3])) outputs = self.gamma * o + inputs return outputs
Example #2
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = F.relu(self.conv1(x)) out = self.bnm1(out) out = F.relu(self.conv2(out)) out = self.bnm2(out) out = F.max_pool2d(out, 2) out = F.relu(self.conv3(out)) out = self.bnm3(out) out = F.relu(self.conv4(out)) out = self.bnm4(out) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) #out = self.dropout1(out) out = F.relu(self.fc1(out)) #out = self.dropout2(out) out = self.bnm5(out) out = F.relu(self.fc2(out)) #out = self.dropout3(out) out = self.bnm6(out) out = self.fc3(out) return (out)
Example #3
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = F.relu(self.conv1(x)) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv3(out)) out = F.relu(self.conv4(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv5(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = self.dropout1(out) out = F.relu(self.fc1(out)) out = self.dropout2(out) out = F.relu(self.fc2(out)) out = self.dropout3(out) out = self.fc3(out) return (out)
Example #4
Source File: pooler.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def apply(features: Tensor, proposal_bboxes: Tensor, proposal_batch_indices: Tensor, mode: Mode) -> Tensor: _, _, feature_map_height, feature_map_width = features.shape scale = 1 / 16 output_size = (7 * 2, 7 * 2) if mode == Pooler.Mode.POOLING: pool = [] for (proposal_bbox, proposal_batch_index) in zip(proposal_bboxes, proposal_batch_indices): start_x = max(min(round(proposal_bbox[0].item() * scale), feature_map_width - 1), 0) # [0, feature_map_width) start_y = max(min(round(proposal_bbox[1].item() * scale), feature_map_height - 1), 0) # (0, feature_map_height] end_x = max(min(round(proposal_bbox[2].item() * scale) + 1, feature_map_width), 1) # [0, feature_map_width) end_y = max(min(round(proposal_bbox[3].item() * scale) + 1, feature_map_height), 1) # (0, feature_map_height] roi_feature_map = features[proposal_batch_index, :, start_y:end_y, start_x:end_x] pool.append(F.adaptive_max_pool2d(input=roi_feature_map, output_size=output_size)) pool = torch.stack(pool, dim=0) elif mode == Pooler.Mode.ALIGN: pool = ROIAlign(output_size, spatial_scale=scale, sampling_ratio=0)( features, torch.cat([proposal_batch_indices.view(-1, 1).float(), proposal_bboxes], dim=1) ) else: raise ValueError pool = F.max_pool2d(input=pool, kernel_size=2, stride=2) return pool
Example #5
Source File: resnet_face.py From emotion_classification with MIT License | 6 votes |
def forward(self, x): x = F.relu(self.bn1_a(self.conv1_a(x))) x_pool1b = F.max_pool2d(F.relu(self.bn1_b(self.conv1_b(x))),2, stride=2) x = self.layer1(x_pool1b) x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))),2, stride=2) x = self.layer2(x) x_pool3 = F.max_pool2d(F.relu(self.bn3(self.conv3(x))),2, stride=2) x = self.layer3(x_pool3) x = F.max_pool2d(F.relu(self.bn4(self.conv4(x))),2, stride=2) x = self.layer4(x) x = x.view(-1, self.num_flat_features(x)) x = self.fc5_new(x) # x1 = x1.view(1,-1,512) # x1, hn1 = self.lstm1(x1, (self.h1, self.c1)) x = self.fc8_final(x) return x
Example #6
Source File: test_merge_cells.py From mmdetection with Apache License 2.0 | 6 votes |
def test_resize_methods(): inputs_x = torch.randn([2, 256, 128, 128]) target_resize_sizes = [(128, 128), (256, 256)] resize_methods_list = ['nearest', 'bilinear'] for method in resize_methods_list: merge_cell = BaseMergeCell(upsample_mode=method) for target_size in target_resize_sizes: merge_cell_out = merge_cell._resize(inputs_x, target_size) gt_out = F.interpolate(inputs_x, size=target_size, mode=method) assert merge_cell_out.equal(gt_out) target_size = (64, 64) # resize to a smaller size merge_cell = BaseMergeCell() merge_cell_out = merge_cell._resize(inputs_x, target_size) kernel_size = inputs_x.shape[-1] // target_size[-1] gt_out = F.max_pool2d( inputs_x, kernel_size=kernel_size, stride=kernel_size) assert (merge_cell_out == gt_out).all()
Example #7
Source File: FastNeuralTransfer.py From Deep-learning-with-cats with GNU General Public License v3.0 | 6 votes |
def forward(self, X): h = F.relu(self.conv1_1(X)) h = F.relu(self.conv1_2(h)) relu1_2 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) relu2_2 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.relu(self.conv3_3(h)) relu3_3 = h h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.relu(self.conv4_3(h)) relu4_3 = h return [relu1_2, relu2_2, relu3_3, relu4_3] ## Weights init function
Example #8
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = F.relu(self.conv1(x)) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv3(out)) out = F.relu(self.conv4(out)) out = F.max_pool2d(out, 2) out = F.relu(self.conv5(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = self.dropout1(out) out = F.relu(self.fc1(out)) out = self.dropout2(out) out = F.relu(self.fc2(out)) out = self.dropout3(out) out = self.fc3(out) return (out)
Example #9
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = F.relu(self.conv1(x)) out = self.bnm1(out) out = F.relu(self.conv2(out)) out = self.bnm2(out) out = F.max_pool2d(out, 2) out = F.relu(self.conv3(out)) out = self.bnm3(out) out = F.relu(self.conv4(out)) out = self.bnm4(out) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) #out = self.dropout1(out) out = F.relu(self.fc1(out)) #out = self.dropout2(out) out = self.bnm5(out) out = F.relu(self.fc2(out)) #out = self.dropout3(out) out = self.bnm6(out) out = self.fc3(out) return (out)
Example #10
Source File: decode_multi.py From posenet-pytorch with Apache License 2.0 | 5 votes |
def build_part_with_score_torch(score_threshold, local_max_radius, scores): lmd = 2 * local_max_radius + 1 max_vals = F.max_pool2d(scores, lmd, stride=1, padding=1) max_loc = (scores == max_vals) & (scores >= score_threshold) max_loc_idx = max_loc.nonzero() scores_vec = scores[max_loc] sort_idx = torch.argsort(scores_vec, descending=True) return scores_vec[sort_idx], max_loc_idx[sort_idx] # FIXME leaving here as reference for now # def build_part_with_score_fast(score_threshold, local_max_radius, scores): # parts = [] # num_keypoints = scores.shape[0] # lmd = 2 * local_max_radius + 1 # # # NOTE it seems faster to iterate over the keypoints and perform maximum_filter # # on each subarray vs doing the op on the full score array with size=(lmd, lmd, 1) # for keypoint_id in range(num_keypoints): # kp_scores = scores[keypoint_id, :, :].copy() # kp_scores[kp_scores < score_threshold] = 0. # max_vals = ndi.maximum_filter(kp_scores, size=lmd, mode='constant') # max_loc = np.logical_and(kp_scores == max_vals, kp_scores > 0) # max_loc_idx = max_loc.nonzero() # for y, x in zip(*max_loc_idx): # parts.append(( # scores[keypoint_id, y, x], # keypoint_id, # np.array((y, x)) # )) # # return parts
Example #11
Source File: dropblock.py From MobileNetV3-pytorch with MIT License | 5 votes |
def _compute_block_mask(self, mask): block_mask = F.max_pool2d(input=mask[:, None, :, :], kernel_size=(self.block_size, self.block_size), stride=(1, 1), padding=self.block_size // 2) if self.block_size % 2 == 0: block_mask = block_mask[:, :, :-1, :-1] keeped = block_mask.numel() - block_mask.sum().to(torch.float32) # prevent overflow in float16 block_mask = 1 - block_mask.squeeze(1) return block_mask, keeped
Example #12
Source File: res2net.py From Res2Net-maskrcnn with MIT License | 5 votes |
def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x
Example #13
Source File: lenet.py From Cuff_less_BP_Prediction with MIT License | 5 votes |
def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out
Example #14
Source File: lenet.py From mixup_pytorch with MIT License | 5 votes |
def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out
Example #15
Source File: darknet.py From pytorch-0.4-yolov3 with MIT License | 5 votes |
def forward(self, x): x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1) return x
Example #16
Source File: shufflenetv2.py From Cuff_less_BP_Prediction with MIT License | 5 votes |
def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) # out = F.max_pool2d(out, 3, stride=2, padding=1) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.relu(self.bn2(self.conv2(out))) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.linear(out) return out
Example #17
Source File: simple_mnist_example.py From hidden-networks with Apache License 2.0 | 5 votes |
def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output
Example #18
Source File: network_blocks.py From ASFF with GNU General Public License v3.0 | 5 votes |
def forward(self, x): x_1 = x x_2 = F.max_pool2d(x, 5, stride=1, padding=2) x_3 = F.max_pool2d(x, 9, stride=1, padding=4) x_4 = F.max_pool2d(x, 13, stride=1, padding=6) out = torch.cat((x_1, x_2, x_3, x_4),dim=1) return out
Example #19
Source File: hrfpn.py From AerialDetection with Apache License 2.0 | 5 votes |
def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False): super(HRFPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, activation=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, conv_cfg=self.conv_cfg, activation=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d
Example #20
Source File: fpn.py From AerialDetection with Apache License 2.0 | 5 votes |
def forward(self, inputs): assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): laterals[i - 1] += F.interpolate( laterals[i], scale_factor=2, mode='nearest') # build outputs # part 1: from original levels outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.extra_convs_on_inputs: orig = inputs[self.backbone_end_level - 1] outs.append(self.fpn_convs[used_backbone_levels](orig)) else: outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs)
Example #21
Source File: HRFPN.py From Parsing-R-CNN with MIT License | 5 votes |
def __init__(self, dim_in, spatial_scale): super().__init__() self.dim_in = sum(dim_in) self.spatial_scale = spatial_scale hrfpn_dim = cfg.FPN.HRFPN.DIM # 256 use_lite = cfg.FPN.HRFPN.USE_LITE use_bn = cfg.FPN.HRFPN.USE_BN use_gn = cfg.FPN.HRFPN.USE_GN if cfg.FPN.HRFPN.POOLING_TYPE == 'AVG': self.pooling = F.avg_pool2d else: self.pooling = F.max_pool2d self.num_extra_pooling = cfg.FPN.HRFPN.NUM_EXTRA_POOLING # 1 self.num_output = len(dim_in) + self.num_extra_pooling # 5 self.reduction_conv = make_conv(self.dim_in, hrfpn_dim, kernel=1, use_bn=use_bn, use_gn=use_gn) self.dim_in = hrfpn_dim self.fpn_conv = nn.ModuleList() for i in range(self.num_output): self.fpn_conv.append( make_conv(self.dim_in, hrfpn_dim, kernel=3, use_dwconv=use_lite, use_bn=use_bn, use_gn=use_gn, suffix_1x1=use_lite) ) self.dim_in = hrfpn_dim if self.num_extra_pooling: self.spatial_scale.append(self.spatial_scale[-1] * 0.5) self.dim_out = [self.dim_in for _ in range(self.num_output)] self._init_weights()
Example #22
Source File: resnet.py From Res2Net-maskrcnn with MIT License | 5 votes |
def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x
Example #23
Source File: fpn.py From Res2Net-maskrcnn with MIT License | 5 votes |
def forward(self, x): return [F.max_pool2d(x, 1, 2, 0)]
Example #24
Source File: model.py From iAI with MIT License | 5 votes |
def forward(self, x): x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2) x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2) x = x.view(-1, 800) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1)
Example #25
Source File: model.py From iAI with MIT License | 5 votes |
def forward(self, x): x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2) x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2) x = x.view(-1, 800) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1)
Example #26
Source File: model.py From iAI with MIT License | 5 votes |
def forward(self, x): x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2) x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2) x = x.view(-1, 800) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1)
Example #27
Source File: mnist.py From iAI with MIT License | 5 votes |
def forward(self, x): x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2) x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2) x = x.view(-1, 800) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1)
Example #28
Source File: inception.py From fast-MPN-COV with MIT License | 5 votes |
def forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return torch.cat(outputs, 1)
Example #29
Source File: network.py From MomentumContrast.pytorch with MIT License | 5 votes |
def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.max_pool2d(x, 2) x = torch.flatten(x, 1) x = self.fc1(x) x = F.normalize(x) return x
Example #30
Source File: Hourglass2015.py From Pytorch-Networks with MIT License | 5 votes |
def _hour_glass_forward(self, n, x): up1 = self.hg[n - 1][0](x) low1 = F.max_pool2d(x, 2, stride=2) low1 = self.hg[n - 1][1](low1) if n > 1: low2 = self._hour_glass_forward(n - 1, low1) else: low2 = self.hg[n - 1][3](low1) low3 = self.hg[n - 1][2](low2) up2 = self.upsample(low3) out = up1 + up2 return out