Python torch.nn.modules.conv._ConvNd() Examples
The following are 7
code examples of torch.nn.modules.conv._ConvNd().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.modules.conv
, or try the search function
.
Example #1
Source File: init.py From Holocron with MIT License | 6 votes |
def init_module(module, nonlinearity=None): """Initializes pytorch modules Args: module (torch.nn.Module): module to initialize nonlinearity (str, optional): linearity to initialize convolutions for """ for m in module.modules(): if isinstance(m, _ConvNd): nn.init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity=nonlinearity) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): m.weight.data.fill_(1.0) m.bias.data.zero_()
Example #2
Source File: nconv.py From nconv with GNU General Public License v3.0 | 5 votes |
def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus', init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True): # Call _ConvNd constructor super(NConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, 0, groups, bias) self.eps = 1e-20 self.pos_fn = pos_fn self.init_method = init_method # Initialize weights and bias self.init_parameters() if self.pos_fn is not None : EnforcePos.apply(self, 'weight', pos_fn)
Example #3
Source File: parrots_wrapper.py From mmcv with Apache License 2.0 | 5 votes |
def _get_conv(): if TORCH_VERSION == 'parrots': from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin else: from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin return _ConvNd, _ConvTransposeMixin
Example #4
Source File: nn_helpers.py From margipose with Apache License 2.0 | 5 votes |
def init_parameters(net): for m in net.modules(): if isinstance(m, _ConvNd): init.kaiming_normal_(m.weight, 0, 'fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): # Kaiming initialisation for linear layers init.normal_(m.weight, 0, sqrt(2.0 / m.weight.size(0))) if m.bias is not None: init.normal_(m.bias, 0, sqrt(2.0 / m.bias.size(0))) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) if m.bias is not None: init.constant_(m.bias, 0)
Example #5
Source File: basic_hooks.py From pytorch-OpCounter with MIT License | 5 votes |
def count_convNd(m: _ConvNd, x: (torch.Tensor,), y: torch.Tensor): x = x[0] kernel_ops = torch.zeros(m.weight.size()[2:]).numel() # Kw x Kh bias_ops = 1 if m.bias is not None else 0 # N x Cout x H x W x (Cin x Kw x Kh + bias) total_ops = y.nelement() * (m.in_channels // m.groups * kernel_ops + bias_ops) m.total_ops += torch.DoubleTensor([int(total_ops)])
Example #6
Source File: basic_hooks.py From pytorch-OpCounter with MIT License | 5 votes |
def count_convNd_ver2(m: _ConvNd, x: (torch.Tensor,), y: torch.Tensor): x = x[0] # N x H x W (exclude Cout) output_size = torch.zeros((y.size()[:1] + y.size()[2:])).numel() # Cout x Cin x Kw x Kh kernel_ops = m.weight.nelement() if m.bias is not None: # Cout x 1 kernel_ops += + m.bias.nelement() # x N x H x W x Cout x (Cin x Kw x Kh + bias) m.total_ops += torch.DoubleTensor([int(output_size * kernel_ops)])
Example #7
Source File: util.py From PointWeb with MIT License | 4 votes |
def init_weights(model, conv='kaiming', batchnorm='normal', linear='kaiming', lstm='kaiming'): """ :param model: Pytorch Model which is nn.Module :param conv: 'kaiming' or 'xavier' :param batchnorm: 'normal' or 'constant' :param linear: 'kaiming' or 'xavier' :param lstm: 'kaiming' or 'xavier' """ for m in model.modules(): if isinstance(m, (_ConvNd)): if conv == 'kaiming': initer.kaiming_normal_(m.weight) elif conv == 'xavier': initer.xavier_normal_(m.weight) else: raise ValueError("init type of conv error.\n") if m.bias is not None: initer.constant_(m.bias, 0) elif isinstance(m, _BatchNorm): if batchnorm == 'normal': initer.normal_(m.weight, 1.0, 0.02) elif batchnorm == 'constant': initer.constant_(m.weight, 1.0) else: raise ValueError("init type of batchnorm error.\n") initer.constant_(m.bias, 0.0) elif isinstance(m, nn.Linear): if linear == 'kaiming': initer.kaiming_normal_(m.weight) elif linear == 'xavier': initer.xavier_normal_(m.weight) else: raise ValueError("init type of linear error.\n") if m.bias is not None: initer.constant_(m.bias, 0) elif isinstance(m, nn.LSTM): for name, param in m.named_parameters(): if 'weight' in name: if lstm == 'kaiming': initer.kaiming_normal_(param) elif lstm == 'xavier': initer.xavier_normal_(param) else: raise ValueError("init type of lstm error.\n") elif 'bias' in name: initer.constant_(param, 0)