Python torch.nn.AdaptiveMaxPool2d() Examples
The following are 30
code examples of torch.nn.AdaptiveMaxPool2d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: compute_memory.py From TreeFilter-Torch with MIT License | 7 votes |
def compute_memory(module, inp, out): if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)): return compute_ReLU_memory(module, inp, out) elif isinstance(module, nn.PReLU): return compute_PReLU_memory(module, inp, out) elif isinstance(module, nn.Conv2d): return compute_Conv2d_memory(module, inp, out) elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_memory(module, inp, out) elif isinstance(module, nn.Linear): return compute_Linear_memory(module, inp, out) elif isinstance(module, ( nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)): return compute_Pool2d_memory(module, inp, out) else: print("[Memory]: {} is not supported!".format(type(module).__name__)) return 0, 0 pass
Example #2
Source File: compute_flops.py From TreeFilter-Torch with MIT License | 7 votes |
def compute_flops(module, inp, out): if isinstance(module, nn.Conv2d): return compute_Conv2d_flops(module, inp, out), 'Conv2d' elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_flops(module, inp, out), 'BatchNorm2d' elif isinstance(module, ( nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)): return compute_Pool2d_flops(module, inp, out), 'Pool2d' elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.Sigmoid)): return compute_ReLU_flops(module, inp, out), 'Activation' elif isinstance(module, nn.Upsample): return compute_Upsample_flops(module, inp, out), 'Upsample' elif isinstance(module, nn.Linear): return compute_Linear_flops(module, inp, out), 'Linear' else: print("[Flops]: {} is not supported!".format(type(module).__name__)) return 0, -1 pass
Example #3
Source File: attention_best.py From Compact-Global-Descriptor with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, in_channels, out_channels, bias=True, nonlinear=True): super(AttentionLayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.softmax = nn.Softmax(dim=1) self.w0 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.w1 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.w2 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.bias0 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) self.bias1 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) self.bias2 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) nn.init.xavier_uniform_(self.w0) nn.init.xavier_uniform_(self.w1) nn.init.xavier_uniform_(self.w2) # self.tanh = nn.Tanh()
Example #4
Source File: flops_counter.py From NAS-Benchmark with GNU General Public License v3.0 | 6 votes |
def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return if isinstance(module, torch.nn.Conv2d): handle = module.register_forward_hook(conv_flops_counter_hook) elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \ torch.nn.LeakyReLU, torch.nn.ReLU6)): handle = module.register_forward_hook(relu_flops_counter_hook) elif isinstance(module, torch.nn.Linear): handle = module.register_forward_hook(linear_flops_counter_hook) elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \ nn.AdaptiveAvgPool2d)): handle = module.register_forward_hook(pool_flops_counter_hook) elif isinstance(module, torch.nn.BatchNorm2d): handle = module.register_forward_hook(bn_flops_counter_hook) elif isinstance(module, torch.nn.Upsample): handle = module.register_forward_hook(upsample_flops_counter_hook) else: handle = module.register_forward_hook(empty_flops_counter_hook) module.__flops_handle__ = handle
Example #5
Source File: CNN.py From nn_builder with MIT License | 6 votes |
def create_and_append_layer(self, input_dim, layer, list_to_append_layer_to): """Creates and appends a layer to the list provided""" layer_name = layer[0].lower() assert layer_name in self.valid_cnn_hidden_layer_types, "Layer name {} not valid, use one of {}".format( layer_name, self.valid_cnn_hidden_layer_types) if layer_name == "conv": list_to_append_layer_to.extend([nn.Conv2d(in_channels=input_dim[0], out_channels=layer[1], kernel_size=layer[2], stride=layer[3], padding=layer[4])]) elif layer_name == "maxpool": list_to_append_layer_to.extend([nn.MaxPool2d(kernel_size=layer[1], stride=layer[2], padding=layer[3])]) elif layer_name == "avgpool": list_to_append_layer_to.extend([nn.AvgPool2d(kernel_size=layer[1], stride=layer[2], padding=layer[3])]) elif layer_name == "adaptivemaxpool": list_to_append_layer_to.extend([nn.AdaptiveMaxPool2d(output_size=(layer[1], layer[2]))]) elif layer_name == "adaptiveavgpool": list_to_append_layer_to.extend([nn.AdaptiveAvgPool2d(output_size=(layer[1], layer[2]))]) elif layer_name == "linear": if isinstance(input_dim, tuple): input_dim = np.prod(np.array(input_dim)) list_to_append_layer_to.extend([nn.Linear(in_features=input_dim, out_features=layer[1])]) else: raise ValueError("Wrong layer name") input_dim = self.calculate_new_dimensions(input_dim, layer) return input_dim
Example #6
Source File: architectures.py From affnet with MIT License | 6 votes |
def __init__(self, PS = 28): super(YiNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 10, kernel_size=5, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding = 1), nn.Conv2d(10, 20, kernel_size=5, stride=1, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=4, stride=2, padding = 2), nn.Conv2d(20, 50, kernel_size=3, stride=1, padding=0, bias = True), nn.ReLU(), nn.AdaptiveMaxPool2d(1), GHH(50, 100), GHH(100, 2) ) self.input_mean = 0.427117081207483 self.input_std = 0.21888339179665006; self.PS = PS return
Example #7
Source File: architectures.py From affnet with MIT License | 6 votes |
def __init__(self, PS = 28): super(YiNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 10, kernel_size=5, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding = 1), nn.Conv2d(10, 20, kernel_size=5, stride=1, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=4, stride=2, padding = 2), nn.Conv2d(20, 50, kernel_size=3, stride=1, padding=0, bias = True), nn.ReLU(), nn.AdaptiveMaxPool2d(1), GHH(50, 100), GHH(100, 2) ) self.input_mean = 0.427117081207483 self.input_std = 0.21888339179665006; self.PS = PS return
Example #8
Source File: architectures.py From affnet with MIT License | 6 votes |
def __init__(self, PS = 28): super(YiNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 10, kernel_size=5, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding = 1), nn.Conv2d(10, 20, kernel_size=5, stride=1, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=4, stride=2, padding = 2), nn.Conv2d(20, 50, kernel_size=3, stride=1, padding=0, bias = True), nn.ReLU(), nn.AdaptiveMaxPool2d(1), GHH(50, 100), GHH(100, 2) ) self.input_mean = 0.427117081207483 self.input_std = 0.21888339179665006; self.PS = PS return
Example #9
Source File: architectures.py From affnet with MIT License | 6 votes |
def __init__(self, PS = 28): super(YiNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 10, kernel_size=5, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding = 1), nn.Conv2d(10, 20, kernel_size=5, stride=1, padding=0, bias = True), nn.ReLU(), nn.MaxPool2d(kernel_size=4, stride=2, padding = 2), nn.Conv2d(20, 50, kernel_size=3, stride=1, padding=0, bias = True), nn.ReLU(), nn.AdaptiveMaxPool2d(1), GHH(50, 100), GHH(100, 2) ) self.input_mean = 0.427117081207483 self.input_std = 0.21888339179665006; self.PS = PS return
Example #10
Source File: models.py From adversarial-object-removal with MIT License | 6 votes |
def __init__(self, image_size=128, c_dim = 5, net_type='vgg19', max_filters=None, global_pool='mean',use_bias=False, class_ftune = 0): super(DiscriminatorGAP_ImageNet, self).__init__() layers = [] nFilt = 512 if max_filters is None else max_filters self.pnet = Vgg19(only_last=True) if net_type == 'vgg19' else None if class_ftune > 0.: pAll = list(self.pnet.named_parameters()) # Multiply by two for weight and bias for pn in pAll[::-1][:2*class_ftune]: pn[1].requires_grad = True layers.append(nn.LeakyReLU(0.1, inplace=True)) layers.append(nn.Conv2d(512, nFilt, kernel_size=3, stride=1, padding=1)) layers.append(nn.BatchNorm2d(nFilt)) layers.append(nn.LeakyReLU(0.1, inplace=True)) layers.append(nn.Conv2d(nFilt, nFilt, kernel_size=3, stride=1, padding=1)) layers.append(nn.BatchNorm2d(nFilt)) layers.append(nn.LeakyReLU(0.1, inplace=True)) self.layers = nn.Sequential(*layers) self.globalPool = nn.AdaptiveAvgPool2d(1) if global_pool == 'mean' else nn.AdaptiveMaxPool2d(1) self.classifyFC = nn.Linear(nFilt, c_dim, bias=use_bias) self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda() self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda() self.c_dim = c_dim
Example #11
Source File: models.py From adversarial-object-removal with MIT License | 6 votes |
def __init__(self, image_size=128, c_dim = 5, net_type='vgg19', max_filters=None, global_pool='mean', topk=3, mink=3, use_bias=False): super(DiscriminatorGAP_ImageNet_Weldon, self).__init__() layers = [] self.topk = topk self.mink = mink nFilt = 512 if max_filters is None else max_filters self.pnet = Vgg19(only_last=True) if net_type == 'vgg19' else None layers.append(nn.LeakyReLU(0.1, inplace=True)) layers.append(nn.Conv2d(512, nFilt, kernel_size=3, stride=1, padding=1)) layers.append(nn.BatchNorm2d(nFilt)) layers.append(nn.LeakyReLU(0.1, inplace=True)) layers.append(nn.Conv2d(nFilt, nFilt, kernel_size=3, stride=1, padding=1)) layers.append(nn.BatchNorm2d(nFilt)) layers.append(nn.LeakyReLU(0.1, inplace=True)) self.layers = nn.Sequential(*layers) #self.AggrConv = nn.conv2d(nFilt, c_dim, kernel_size=1, stride=1, bias=False) self.classifyConv = nn.Conv2d(nFilt, c_dim, kernel_size=1, stride=1, bias=use_bias) self.globalPool = nn.AdaptiveAvgPool2d(1) if global_pool == 'mean' else nn.AdaptiveMaxPool2d(1) self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda() self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda() self.c_dim = c_dim
Example #12
Source File: branches.py From ABD-Net with MIT License | 6 votes |
def __init__(self, owner, backbone, args, input_dim, part_num=None): super().__init__() self.owner = weakref.ref(owner) self.input_dim = input_dim self.output_dim = args['np_dim'] self.args = args self.num_classes = owner.num_classes self.with_global = args['np_with_global'] if part_num is None: part_num = args['np_np'] self.part_num = subbranch_num = part_num if self.with_global: subbranch_num += 1 self.fcs = nn.ModuleList([self._init_fc_layer() for i in range(subbranch_num)]) if args['np_max_pooling']: self.avgpool = nn.AdaptiveMaxPool2d(1) else: self.avgpool = nn.AdaptiveAvgPool2d(1) self.classifiers = nn.ModuleList([self._init_classifier() for i in range(subbranch_num)])
Example #13
Source File: branches.py From ABD-Net with MIT License | 6 votes |
def __init__(self, owner, backbone, args, input_dim): super().__init__() self.owner = weakref.ref(owner) self.input_dim = input_dim self.output_dim = args['global_dim'] self.args = args self.num_classes = owner.num_classes self._init_fc_layer() if args['global_max_pooling']: self.avgpool = nn.AdaptiveMaxPool2d(1) else: self.avgpool = nn.AdaptiveAvgPool2d(1) self._init_classifier()
Example #14
Source File: test_model.py From EfficientNet-PyTorch with Apache License 2.0 | 6 votes |
def test_modify_pool(net, img_size): """Test ability to modify pooling module of network""" class AdaptiveMaxAvgPool(nn.Module): def __init__(self): super().__init__() self.ada_avgpool = nn.AdaptiveAvgPool2d(1) self.ada_maxpool = nn.AdaptiveMaxPool2d(1) def forward(self, x): avg_x = self.ada_avgpool(x) max_x = self.ada_maxpool(x) x = torch.cat((avg_x, max_x), dim=1) return x avg_pooling = AdaptiveMaxAvgPool() fc = nn.Linear(net._fc.in_features * 2, net._global_params.num_classes) net._avg_pooling = avg_pooling net._fc = fc data = torch.zeros((2, 3, img_size, img_size)) output = net(data) assert not torch.isnan(output).any()
Example #15
Source File: flops_counter.py From OctaveConv_pytorch with MIT License | 6 votes |
def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return if isinstance(module, torch.nn.Conv2d): handle = module.register_forward_hook(conv_flops_counter_hook) elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \ torch.nn.LeakyReLU, torch.nn.ReLU6)): handle = module.register_forward_hook(relu_flops_counter_hook) elif isinstance(module, torch.nn.Linear): handle = module.register_forward_hook(linear_flops_counter_hook) elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \ nn.AdaptiveAvgPool2d)): handle = module.register_forward_hook(pool_flops_counter_hook) elif isinstance(module, torch.nn.BatchNorm2d): handle = module.register_forward_hook(bn_flops_counter_hook) elif isinstance(module, torch.nn.Upsample): handle = module.register_forward_hook(upsample_flops_counter_hook) else: handle = module.register_forward_hook(empty_flops_counter_hook) module.__flops_handle__ = handle
Example #16
Source File: flops_counter.py From fedavgpy with MIT License | 6 votes |
def add_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): return if isinstance(module, torch.nn.Conv2d): handle = module.register_forward_hook(conv_flops_counter_hook) elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, torch.nn.LeakyReLU, torch.nn.ReLU6)): handle = module.register_forward_hook(relu_flops_counter_hook) elif isinstance(module, torch.nn.Linear): handle = module.register_forward_hook(linear_flops_counter_hook) elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d)): handle = module.register_forward_hook(pool_flops_counter_hook) elif isinstance(module, torch.nn.BatchNorm2d): handle = module.register_forward_hook(bn_flops_counter_hook) elif isinstance(module, torch.nn.Upsample): handle = module.register_forward_hook(upsample_flops_counter_hook) else: handle = module.register_forward_hook(empty_flops_counter_hook) module.__flops_handle__ = handle
Example #17
Source File: LinearClassifier.py From FeatureDecoupling with MIT License | 6 votes |
def __init__(self, opt): super(Classifier, self).__init__() nChannels = opt['nChannels'] num_classes = opt['num_classes'] pool_size = opt['pool_size'] pool_type = opt['pool_type'] if ('pool_type' in opt) else 'max' nChannelsAll = nChannels * pool_size * pool_size self.classifier = nn.Sequential() if pool_type == 'max': self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size))) elif pool_type == 'avg': self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size))) self.classifier.add_module('BatchNorm', nn.BatchNorm2d(nChannels, affine=False)) self.classifier.add_module('Flatten', Flatten()) self.classifier.add_module('LiniearClassifier', nn.Linear(nChannelsAll, num_classes)) self.initilize()
Example #18
Source File: LinearClassifier.py From FeatureLearningRotNet with MIT License | 6 votes |
def __init__(self, opt): super(Classifier, self).__init__() nChannels = opt['nChannels'] num_classes = opt['num_classes'] pool_size = opt['pool_size'] pool_type = opt['pool_type'] if ('pool_type' in opt) else 'max' nChannelsAll = nChannels * pool_size * pool_size self.classifier = nn.Sequential() if pool_type == 'max': self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size))) elif pool_type == 'avg': self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size))) self.classifier.add_module('BatchNorm', nn.BatchNorm2d(nChannels, affine=False)) self.classifier.add_module('Flatten', Flatten()) self.classifier.add_module('LiniearClassifier', nn.Linear(nChannelsAll, num_classes)) self.initilize()
Example #19
Source File: baseline.py From fast-reid with Apache License 2.0 | 6 votes |
def __init__(self, cfg): super().__init__() self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1)) self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1)) self._cfg = cfg # backbone self.backbone = build_backbone(cfg) # head pool_type = cfg.MODEL.HEADS.POOL_LAYER if pool_type == 'avgpool': pool_layer = FastGlobalAvgPool2d() elif pool_type == 'maxpool': pool_layer = nn.AdaptiveMaxPool2d(1) elif pool_type == 'gempool': pool_layer = GeneralizedMeanPoolingP() elif pool_type == "avgmaxpool": pool_layer = AdaptiveAvgMaxPool2d() elif pool_type == "identity": pool_layer = nn.Identity() else: raise KeyError(f"{pool_type} is invalid, please choose from " f"'avgpool', 'maxpool', 'gempool', 'avgmaxpool' and 'identity'.") in_feat = cfg.MODEL.HEADS.IN_FEAT num_classes = cfg.MODEL.HEADS.NUM_CLASSES self.heads = build_reid_heads(cfg, in_feat, num_classes, pool_layer)
Example #20
Source File: network.py From talking-heads with GNU General Public License v3.0 | 6 votes |
def __init__(self, gpu=None): super(Embedder, self).__init__() self.conv1 = ResidualBlockDown(6, 64) self.conv2 = ResidualBlockDown(64, 128) self.conv3 = ResidualBlockDown(128, 256) self.att = SelfAttention(256) self.conv4 = ResidualBlockDown(256, 512) self.conv5 = ResidualBlockDown(512, 512) self.conv6 = ResidualBlockDown(512, 512) self.pooling = nn.AdaptiveMaxPool2d((1, 1)) self.apply(weights_init) self.gpu = gpu if gpu is not None: self.cuda(gpu)
Example #21
Source File: network.py From talking-heads with GNU General Public License v3.0 | 6 votes |
def __init__(self, training_videos, gpu=None): super(Discriminator, self).__init__() self.conv1 = ResidualBlockDown(6, 64) self.conv2 = ResidualBlockDown(64, 128) self.conv3 = ResidualBlockDown(128, 256) self.att = SelfAttention(256) self.conv4 = ResidualBlockDown(256, 512) self.conv5 = ResidualBlockDown(512, 512) self.conv6 = ResidualBlockDown(512, 512) self.res_block = ResidualBlock(512) self.pooling = nn.AdaptiveMaxPool2d((1, 1)) self.W = nn.Parameter(torch.rand(512, training_videos).normal_(0.0, 0.02)) self.w_0 = nn.Parameter(torch.rand(512, 1).normal_(0.0, 0.02)) self.b = nn.Parameter(torch.rand(1).normal_(0.0, 0.02)) self.apply(weights_init) self.gpu = gpu if gpu is not None: self.cuda(gpu)
Example #22
Source File: attention_best.py From Compact-Global-Descriptor with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, in_channels, out_channels, bias=True, nonlinear=True): super(AttentionLayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.softmax = nn.Softmax(dim=1) self.w0 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.w1 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.w2 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.bias0 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) self.bias1 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) self.bias2 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) nn.init.xavier_uniform_(self.w0) nn.init.xavier_uniform_(self.w1) nn.init.xavier_uniform_(self.w2) # self.tanh = nn.Tanh()
Example #23
Source File: attention_best.py From Compact-Global-Descriptor with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, in_channels, out_channels, bias=True, nonlinear=True): super(AttentionLayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.softmax = nn.Softmax(dim=1) self.w0 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.w1 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.w2 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True) self.bias0 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) self.bias1 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) self.bias2 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True) nn.init.xavier_uniform_(self.w0) nn.init.xavier_uniform_(self.w1) nn.init.xavier_uniform_(self.w2) # self.tanh = nn.Tanh()
Example #24
Source File: flops_counter.py From NAS-Benchmark with GNU General Public License v3.0 | 5 votes |
def is_supported_instance(module): if isinstance(module, (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \ torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, \ torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, \ torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d)): return True return False
Example #25
Source File: DPN.py From ReXCam with MIT License | 5 votes |
def __init__(self, output_size=1, pool_type='avg'): super(AdaptiveAvgMaxPool2d, self).__init__() self.output_size = output_size self.pool_type = pool_type if pool_type == 'avgmaxc' or pool_type == 'avgmax': self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)]) elif pool_type == 'max': self.pool = nn.AdaptiveMaxPool2d(output_size) else: if pool_type != 'avg': print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type) self.pool = nn.AdaptiveAvgPool2d(output_size)
Example #26
Source File: archs.py From paiss with MIT License | 5 votes |
def __init__(self, block, layers, fc_out, norm_features=True, aggregation='gem', dropout_p=None, gemp=3, without_fc=False): self.inplanes = 64 self.norm_features = norm_features self.aggregation = aggregation self.without_fc = without_fc self.out_features = fc_out super(ResNet_RMAC, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # Aggregation layer if aggregation == None: self.adpool = nn.AvgPool2d(kernel_size=7, stride=1, padding=0) elif aggregation == 'max': self.adpool = nn.AdaptiveMaxPool2d(output_size=1) elif aggregation == 'avg': self.adpool = nn.AdaptiveAvgPool2d(output_size=1) elif aggregation == 'gem': self.adpool = GeneralizedMeanPooling(norm_type=gemp, output_size=1) self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None # Final FC layer self.fc = nn.Linear(512 * block.expansion, fc_out) if not self.without_fc else None
Example #27
Source File: wsdan.py From WS-DAN.PyTorch with MIT License | 5 votes |
def __init__(self, pool='GAP'): super(BAP, self).__init__() assert pool in ['GAP', 'GMP'] if pool == 'GAP': self.pool = None else: self.pool = nn.AdaptiveMaxPool2d(1)
Example #28
Source File: cbam.py From pytorch-image-models with Apache License 2.0 | 5 votes |
def __init__(self, channels, reduction=16, act_layer=nn.ReLU): super(ChannelAttn, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.fc1 = nn.Conv2d(channels, channels // reduction, 1, bias=False) self.act = act_layer(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, 1, bias=False)
Example #29
Source File: blocks.py From WS-DAN.PyTorch with MIT License | 5 votes |
def __init__(self, channel, reduction=16, spatial_kernel=7): super(CBAMLayer, self).__init__() # channel attention self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.mlp = nn.Sequential( nn.Conv2d(channel, channel // reduction, 1, bias=False), nn.ReLU(inplace=True), nn.Conv2d(channel // reduction, channel, 1, bias=False), ) # spatial attention self.conv = nn.Conv2d(2, 1, kernel_size=spatial_kernel, padding=spatial_kernel//2, bias=False) self.sigmoid = nn.Sigmoid()
Example #30
Source File: channle.py From Double-Branch-Dual-Attention-Mechanism-Network with GNU Affero General Public License v3.0 | 5 votes |
def __init__(self, in_planes, ratio=16): super(ChannelAttention, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.max_pool = nn.AdaptiveMaxPool2d(1) self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False) self.relu1 = nn.ReLU() self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False) self.sigmoid = nn.Sigmoid()