Python torch.nn.init.normal() Examples
The following are 30
code examples of torch.nn.init.normal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.init
, or try the search function
.
Example #1
Source File: networks_other.py From Attention-Gated-Networks with MIT License | 8 votes |
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(gpu_ids[0]) init_weights(netD, init_type=init_type) return netD
Example #2
Source File: network.py From DMIT with MIT License | 7 votes |
def weights_init(init_type='xavier'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'): if init_type == 'normal': init.normal(m.weight.data, 0.0, 0.02) elif init_type == 'xavier': init.xavier_normal(m.weight.data, gain=math.sqrt(2)) elif init_type == 'kaiming': init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal(m.weight.data, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, "Unsupported initialization: {}".format(init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant(m.bias.data, 0.0) elif (classname.find('Norm') == 0): if hasattr(m, 'weight') and m.weight is not None: init.constant(m.weight.data, 1.0) if hasattr(m, 'bias') and m.bias is not None: init.constant(m.bias.data, 0.0) return init_fun
Example #3
Source File: networks.py From cp-vton with MIT License | 6 votes |
def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False): super(FeatureExtraction, self).__init__() downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1) model = [downconv, nn.ReLU(True), norm_layer(ngf)] for i in range(n_layers): in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512 out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512 downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1) model += [downconv, nn.ReLU(True)] model += [norm_layer(out_ngf)] model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)] model += [norm_layer(512)] model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)] self.model = nn.Sequential(*model) init_weights(self.model, init_type='normal')
Example #4
Source File: networks.py From DepthNets with MIT License | 6 votes |
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'pixel': netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(gpu_ids[0]) init_weights(netD, init_type=init_type) return netD
Example #5
Source File: utils.py From mixup_pytorch with MIT License | 6 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0) #_, term_width = os.popen('stty size', 'r').read().split() # term_width = int(term_width)
Example #6
Source File: networks.py From Recycle-GAN with MIT License | 6 votes |
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'pixel': netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(gpu_ids[0]) init_weights(netD, init_type=init_type) return netD
Example #7
Source File: embedding.py From Hash-Embeddings with MIT License | 6 votes |
def reset_parameters(self, init_shared=lambda x: normal(x, std=0.1), init_importance=lambda x: normal(x, std=0.0005)): """Resets the trainable parameters.""" def set_constant_row(parameters, iRow=0, value=0): """Return `parameters` with row `iRow` as s constant `value`.""" data = parameters.data data[iRow, :] = value return torch.nn.Parameter(data, requires_grad=parameters.requires_grad) np.random.seed(self.seed) if self.seed is not None: torch.manual_seed(self.seed) self.shared_embeddings.weight = init_shared(self.shared_embeddings.weight) self.importance_weights.weight = init_importance(self.importance_weights.weight) if self.padding_idx is not None: # Unfortunately has to set weight to 0 even when paddingIdx = 0 self.shared_embeddings.weight = set_constant_row(self.shared_embeddings.weight) self.importance_weights.weight = set_constant_row(self.importance_weights.weight) self.shared_embeddings.weight.requires_grad = self.train_sharedEmbed self.importance_weights.weight.requires_grad = self.train_weight
Example #8
Source File: networks.py From Single-Image-Reflection-Removal-Beyond-Linearity with MIT License | 6 votes |
def define_D(input_nc, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]): netD = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netD == 'basic': netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) elif which_model_netD == 'n_layers': netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % which_model_netD) if use_gpu: netD.cuda(gpu_ids[0]) init_weights(netD, init_type=init_type) return netD
Example #9
Source File: SDAE.py From DCC with MIT License | 6 votes |
def __init__(self, dim, dropout=0.2, slope=0.0): super(SDAE, self).__init__() self.in_dim = dim[0] self.nlayers = len(dim)-1 self.reluslope = slope self.enc, self.dec = [], [] for i in range(self.nlayers): self.enc.append(nn.Linear(dim[i], dim[i+1])) setattr(self, 'enc_{}'.format(i), self.enc[-1]) self.dec.append(nn.Linear(dim[i+1], dim[i])) setattr(self, 'dec_{}'.format(i), self.dec[-1]) self.base = [] for i in range(self.nlayers): self.base.append(nn.Sequential(*self.enc[:i])) self.dropmodule1 = nn.Dropout(p=dropout) self.dropmodule2 = nn.Dropout(p=dropout) self.loss = nn.MSELoss(size_average=True) # initialization for m in self.modules(): if isinstance(m, nn.Linear): init.normal(m.weight, std=1e-2) if m.bias.data is not None: init.constant(m.bias, 0)
Example #10
Source File: extractSDAE.py From DCC with MIT License | 6 votes |
def __init__(self, dim, slope=0.0): super(extractSDAE, self).__init__() self.in_dim = dim[0] self.nlayers = len(dim)-1 self.reluslope = slope self.enc, self.dec = [], [] for i in range(self.nlayers): self.enc.append(nn.Linear(dim[i], dim[i+1])) setattr(self, 'enc_{}'.format(i), self.enc[-1]) self.dec.append(nn.Linear(dim[i+1], dim[i])) setattr(self, 'dec_{}'.format(i), self.dec[-1]) self.base = [] for i in range(self.nlayers): self.base.append(nn.Sequential(*self.enc[:i])) # initialization for m in self.modules(): if isinstance(m, nn.Linear): init.normal(m.weight, std=1e-2) if m.bias.data is not None: init.constant(m.bias, 0)
Example #11
Source File: networks.py From Single-Image-Reflection-Removal-Beyond-Linearity with MIT License | 6 votes |
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]): netG = None use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) if which_model_netG == 'reflrmnetwork': netG = ReflRmNetwork(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids) else: raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG) if len(gpu_ids) > 0: netG.cuda(gpu_ids[0]) init_weights(netG, init_type=init_type) return netG
Example #12
Source File: resnet.py From One-Example-Person-ReID with MIT License | 5 votes |
def reset_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias is not None: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=0.001) if m.bias is not None: init.constant(m.bias, 0)
Example #13
Source File: train.py From pytorch-zssr with Apache License 2.0 | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.normal(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #14
Source File: shufflenetv2plus.py From LightNet with MIT License | 5 votes |
def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias is not None: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=0.001) if m.bias is not None: init.constant(m.bias, 0)
Example #15
Source File: networks.py From Single-Image-Reflection-Removal-Beyond-Linearity with MIT License | 5 votes |
def weights_init_orthogonal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.normal(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #16
Source File: networks.py From Single-Image-Reflection-Removal-Beyond-Linearity with MIT License | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.normal(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #17
Source File: networks.py From Single-Image-Reflection-Removal-Beyond-Linearity with MIT License | 5 votes |
def weights_init_xavier(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.xavier_normal(m.weight.data, gain=0.02) elif classname.find('Linear') != -1: init.xavier_normal(m.weight.data, gain=0.02) elif classname.find('BatchNorm2d') != -1: init.normal(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #18
Source File: rfshufflenetv2plus.py From LightNet with MIT License | 5 votes |
def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias is not None: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=0.001) if m.bias is not None: init.constant(m.bias, 0)
Example #19
Source File: resnet.py From open-reid with MIT License | 5 votes |
def reset_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias is not None: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=0.001) if m.bias is not None: init.constant(m.bias, 0)
Example #20
Source File: misc.py From attention_branch_network with MIT License | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #21
Source File: utils.py From mixup with BSD 3-Clause "New" or "Revised" License | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #22
Source File: utils.py From YellowFin_Pytorch with Apache License 2.0 | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #23
Source File: utils.py From Fixup with BSD 3-Clause "New" or "Revised" License | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #24
Source File: utils.py From dl2 with MIT License | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #25
Source File: config.py From FPN-Pytorch with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #26
Source File: misc.py From Batch-Instance-Normalization with MIT License | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #27
Source File: config.py From Detectron.pytorch with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #28
Source File: network.py From Counting-ICCV-DSSINet with MIT License | 5 votes |
def weights_normal_init(model, dev=0.01): if isinstance(model, list): for m in model: weights_normal_init(m, dev) else: for m in model.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, dev) if m.bias is not None: m.bias.data.fill_(0.0) elif isinstance(m, nn.LSTM): for weight_set in m._all_weights: for param in weight_set: if 'weight' in param: m.__getattr__(param).data.normal_(0.0, dev) if 'bias' in param: m.__getattr__(param).data.fill_(0.0) elif isinstance(m, _Conv2d_dilated): m.weight.data.copy_(m.weight.data.normal_(0.0, dev)) if m.bias is not None: m.bias.data.fill_(0.0) elif isinstance(m, nn.Linear): m.weight.data.normal_(0.0, dev) elif isinstance(m, nn.BatchNorm2d): init.normal(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #29
Source File: utils.py From one-pixel-attack-pytorch with MIT License | 5 votes |
def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
Example #30
Source File: tools.py From binseg_pytoch with Apache License 2.0 | 5 votes |
def initialize_weights(method='kaiming', *models): for model in models: for module in model.modules(): if isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Linear): if method == 'kaiming': init.kaiming_normal(module.weight.data, np.sqrt(2.0)) elif method == 'xavier': init.xavier_normal(module.weight.data, np.sqrt(2.0)) elif method == 'orthogonal': init.orthogonal(module.weight.data, np.sqrt(2.0)) elif method == 'normal': init.normal(module.weight.data,mean=0, std=0.02) if module.bias is not None: init.constant(module.bias.data,0)