Python torch.nn.init.normal_() Examples
The following are 30
code examples of torch.nn.init.normal_().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.init
, or try the search function
.
Example #1
Source File: torch_utils.py From Pose2Seg with MIT License | 6 votes |
def init_weights(m, mode='MSRAFill'): import torch.nn as nn import torch.nn.init as init from torchlab.nnlib.init import XavierFill, MSRAFill if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): if mode == 'GaussianFill': init.normal_(m.weight, std=0.001) elif mode == 'MSRAFill': MSRAFill(m.weight) else: raise ValueError if m.bias is not None: init.constant_(m.bias, 0) if isinstance(m, nn.Linear): XavierFill(m.weight) init.constant_(m.bias, 0)
Example #2
Source File: GNNlikeCNN2015.py From Pytorch-Networks with MIT License | 6 votes |
def _initialize_weights_norm(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: # mobilenet conv2d doesn't add bias init.constant_(m.bias, 0.0) # last layer of these block don't have Relu init.normal_(self.model1_1[8].weight, std=0.01) init.normal_(self.model1_2[8].weight, std=0.01) init.normal_(self.model2_1[12].weight, std=0.01) init.normal_(self.model3_1[12].weight, std=0.01) init.normal_(self.model4_1[12].weight, std=0.01) init.normal_(self.model5_1[12].weight, std=0.01) init.normal_(self.model6_1[12].weight, std=0.01) init.normal_(self.model2_2[12].weight, std=0.01) init.normal_(self.model3_2[12].weight, std=0.01) init.normal_(self.model4_2[12].weight, std=0.01) init.normal_(self.model5_2[12].weight, std=0.01) init.normal_(self.model6_2[12].weight, std=0.01)
Example #3
Source File: model.py From jdit with Apache License 2.0 | 6 votes |
def _weight_init(self, m): if (m is None) or (not hasattr(m, "weight")): return if (m.bias is not None) and hasattr(m, "bias"): m.bias.data.zero_() if isinstance(m, Conv2d): self.init_fc(m.weight) # m.bias.data.zero_() elif isinstance(m, Linear): self.init_fc(m.weight) # m.bias.data.zero_() elif isinstance(m, ConvTranspose2d): self.init_fc(m.weight) # m.bias.data.zero_() elif isinstance(m, InstanceNorm2d): init.normal_(m.weight, 1.0, 0.02) # m.bias.data.fill_(0) elif isinstance(m, BatchNorm2d): init.normal_(m.weight, 1.0, 0.02) # m.bias.data.fill_(0) else: pass
Example #4
Source File: networks.py From densebody_pytorch with GNU General Public License v3.0 | 6 votes |
def init_weights(net, init_type='normal', gain=0.02): def init_func(m): classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) print('initialize network with %s' % init_type) net.apply(init_func)
Example #5
Source File: outputs.py From Parsing-R-CNN with MIT License | 6 votes |
def __init__(self, dim_in): super().__init__() self.dim_in = dim_in self.cls_on = cfg.FAST_RCNN.CLS_ON self.reg_on = cfg.FAST_RCNN.REG_ON if self.cls_on: self.cls_score = nn.Linear(self.dim_in, cfg.MODEL.NUM_CLASSES) init.normal_(self.cls_score.weight, std=0.01) init.constant_(self.cls_score.bias, 0) # self.avgpool = nn.AdaptiveAvgPool2d(1) if self.reg_on: if cfg.FAST_RCNN.CLS_AGNOSTIC_BBOX_REG: # bg and fg self.bbox_pred = nn.Linear(self.dim_in, 4 * 2) else: self.bbox_pred = nn.Linear(self.dim_in, 4 * cfg.MODEL.NUM_CLASSES) init.normal_(self.bbox_pred.weight, std=0.001) init.constant_(self.bbox_pred.bias, 0)
Example #6
Source File: utils.py From prediction-flow with MIT License | 6 votes |
def init_weights(model): if isinstance(model, nn.Linear): if model.weight is not None: init.kaiming_uniform_(model.weight.data) if model.bias is not None: init.normal_(model.bias.data) elif isinstance(model, nn.BatchNorm1d): if model.weight is not None: init.normal_(model.weight.data, mean=1, std=0.02) if model.bias is not None: init.constant_(model.bias.data, 0) elif isinstance(model, nn.BatchNorm2d): if model.weight is not None: init.normal_(model.weight.data, mean=1, std=0.02) if model.bias is not None: init.constant_(model.bias.data, 0) elif isinstance(model, nn.BatchNorm3d): if model.weight is not None: init.normal_(model.weight.data, mean=1, std=0.02) if model.bias is not None: init.constant_(model.bias.data, 0) else: pass
Example #7
Source File: networks.py From MeshCNN with MIT License | 6 votes |
def init_weights(net, init_type, init_gain): def init_func(m): classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'normal': init.normal_(m.weight.data, 0.0, init_gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=init_gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=init_gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, init_gain) init.constant_(m.bias.data, 0.0) net.apply(init_func)
Example #8
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def weights_init(m): ''' Code from https://gist.github.com/jeasinema/ed9236ce743c8efaf30fa2ff732749f5 Usage: model = Model() model.apply(weight_init) ''' if isinstance(m, nn.Linear): init.xavier_normal_(m.weight.data) init.normal_(m.bias.data) elif isinstance(m, nn.GRUCell): for param in m.parameters(): if len(param.shape) >= 2: init.orthogonal_(param.data) else: init.normal_(param.data)
Example #9
Source File: utils.py From dgl with Apache License 2.0 | 6 votes |
def dgmg_message_weight_init(m): """ This is similar as the function above where we initialize linear layers from a normal distribution with std 1./10 as suggested by the author. This should only be used for the message passing functions, i.e. fe's in the paper. """ def _weight_init(m): if isinstance(m, nn.Linear): init.normal_(m.weight.data, std=1./10) init.normal_(m.bias.data, std=1./10) else: raise ValueError('Expected the input to be of type nn.Linear!') if isinstance(m, nn.ModuleList): for layer in m: layer.apply(_weight_init) else: m.apply(_weight_init)
Example #10
Source File: train_siamrpn.py From Siamese-RPN-pytorch with MIT License | 6 votes |
def init_weights(net, init_type='normal', gain=0.02): def init_func(m): # this will apply to each layer classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1): if init_type=='normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) #print('initialize network with %s' % init_type) net.apply(init_func)
Example #11
Source File: test_siamrpn.py From Siamese-RPN-pytorch with MIT License | 6 votes |
def init_weights(net, init_type='normal', gain=0.02): def init_func(m): # this will apply to each layer classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1): if init_type=='normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) #print('initialize network with %s' % init_type) net.apply(init_func)
Example #12
Source File: train_siamrpn.py From Siamese-RPN-pytorch with MIT License | 6 votes |
def init_weights(net, init_type='normal', gain=0.02): def init_func(m): # this will apply to each layer classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('conv')!=-1 or classname.find('Linear')!=-1): if init_type=='normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')#good for relu elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) #print('initialize network with %s' % init_type) net.apply(init_func)
Example #13
Source File: fast_rcnn_heads.py From FPN-Pytorch with MIT License | 5 votes |
def _init_weights(self): init.normal_(self.cls_score.weight, std=0.01) init.constant_(self.cls_score.bias, 0) if cfg.FAST_RCNN.FOCAL_LOSS and cfg.FAST_RCNN.FL_INIT: self.cls_score.bias[0].data = torch.from_numpy(np.array(np.log((cfg.MODEL.NUM_CLASSES - 1) * (1 - cfg.FAST_RCNN.PRIOR) / (cfg.FAST_RCNN.PRIOR)))) init.normal_(self.bbox_pred.weight, std=0.001) init.constant_(self.bbox_pred.bias, 0)
Example #14
Source File: mask_rcnn_heads.py From FPN-Pytorch with MIT License | 5 votes |
def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): if cfg.MRCNN.CONV_INIT == 'GaussianFill': init.normal_(m.weight, std=0.001) elif cfg.MRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(m.weight) else: raise ValueError if m.bias is not None: init.constant_(m.bias, 0)
Example #15
Source File: mask_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self): if cfg.MRCNN.CONV_INIT == 'GaussianFill': init.normal_(self.upconv5.weight, std=0.001) elif cfg.MRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(self.upconv5.weight) init.constant_(self.upconv5.bias, 0)
Example #16
Source File: keypoint_rcnn_heads.py From FPN-Pytorch with MIT License | 5 votes |
def _init_weights(self): if cfg.KRCNN.USE_DECONV: init.normal_(self.deconv.weight, std=0.01) init.constant_(self.deconv.bias, 0) if cfg.KRCNN.CONV_INIT == 'GaussianFill': init.normal_(self.classify.weight, std=0.001) elif cfg.KRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(self.classify.weight) else: raise ValueError(cfg.KRCNN.CONV_INIT) init.constant_(self.classify.bias, 0)
Example #17
Source File: mask_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self): if cfg.MRCNN.CONV_INIT == 'GaussianFill': init.normal_(self.upconv5.weight, std=0.001) elif cfg.MRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(self.upconv5.weight) init.constant_(self.upconv5.bias, 0)
Example #18
Source File: mask_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): if cfg.MRCNN.CONV_INIT == 'GaussianFill': init.normal_(m.weight, std=0.001) elif cfg.MRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(m.weight) else: raise ValueError if m.bias is not None: init.constant_(m.bias, 0)
Example #19
Source File: mask_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): if cfg.MRCNN.CONV_INIT == 'GaussianFill': init.normal_(m.weight, std=0.001) elif cfg.MRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(m.weight) else: raise ValueError init.constant_(m.bias, 0)
Example #20
Source File: rpn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self): init.normal_(self.RPN_conv.weight, std=0.01) init.constant_(self.RPN_conv.bias, 0) init.normal_(self.RPN_cls_score.weight, std=0.01) init.constant_(self.RPN_cls_score.bias, 0) init.normal_(self.RPN_bbox_pred.weight, std=0.01) init.constant_(self.RPN_bbox_pred.bias, 0)
Example #21
Source File: FPN.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self): init.normal_(self.FPN_RPN_conv.weight, std=0.01) init.constant_(self.FPN_RPN_conv.bias, 0) init.normal_(self.FPN_RPN_cls_score.weight, std=0.01) init.constant_(self.FPN_RPN_cls_score.bias, 0) init.normal_(self.FPN_RPN_bbox_pred.weight, std=0.01) init.constant_(self.FPN_RPN_bbox_pred.bias, 0)
Example #22
Source File: keypoint_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self, m): if isinstance(m, nn.Conv2d): if cfg.KRCNN.CONV_INIT == 'GaussianFill': init.normal_(m.weight, std=0.01) elif cfg.KRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(m.weight) else: ValueError('Unexpected cfg.KRCNN.CONV_INIT: {}'.format(cfg.KRCNN.CONV_INIT)) init.constant_(m.bias, 0)
Example #23
Source File: keypoint_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self): if cfg.KRCNN.USE_DECONV: init.normal_(self.deconv.weight, std=0.01) init.constant_(self.deconv.bias, 0) if cfg.KRCNN.CONV_INIT == 'GaussianFill': init.normal_(self.classify.weight, std=0.001) elif cfg.KRCNN.CONV_INIT == 'MSRAFill': mynn.init.MSRAFill(self.classify.weight) else: raise ValueError(cfg.KRCNN.CONV_INIT) init.constant_(self.classify.bias, 0)
Example #24
Source File: fast_rcnn_heads.py From Detectron.pytorch with MIT License | 5 votes |
def _init_weights(self): init.normal_(self.cls_score.weight, std=0.01) init.constant_(self.cls_score.bias, 0) init.normal_(self.bbox_pred.weight, std=0.001) init.constant_(self.bbox_pred.bias, 0)
Example #25
Source File: GST.py From GST-Tacotron with MIT License | 5 votes |
def __init__(self): super().__init__() self.embed = nn.Parameter(torch.FloatTensor(hp.token_num, hp.E // hp.num_heads)) d_q = hp.E // 2 d_k = hp.E // hp.num_heads # self.attention = MultiHeadAttention(hp.num_heads, d_model, d_q, d_v) self.attention = MultiHeadAttention(query_dim=d_q, key_dim=d_k, num_units=hp.E, num_heads=hp.num_heads) init.normal_(self.embed, mean=0, std=0.5)
Example #26
Source File: model.py From Cross-Modal-Re-ID-baseline with MIT License | 5 votes |
def weights_init_classifier(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: init.normal_(m.weight.data, 0, 0.001) if m.bias: init.zeros_(m.bias.data)
Example #27
Source File: model.py From Cross-Modal-Re-ID-baseline with MIT License | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal_(m.weight.data, a=0, mode='fan_out') init.zeros_(m.bias.data) elif classname.find('BatchNorm1d') != -1: init.normal_(m.weight.data, 1.0, 0.01) init.zeros_(m.bias.data)
Example #28
Source File: unet.py From Pytorch_Medical_Segmention_Template with MIT License | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ #print(classname) if classname.find('Conv') != -1: init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: init.normal_(m.weight.data, 1.0, 0.02) init.constant_(m.bias.data, 0.0) #model = UNet() #torchsummary.summary(model, (1, 512, 512))
Example #29
Source File: networks_pix2pixhd.py From iSketchNFill with GNU General Public License v3.0 | 5 votes |
def init_weights(self, init_type='normal', gain=0.02): def init_func(m): classname = m.__class__.__name__ if classname.find('BatchNorm2d') != -1: if hasattr(m, 'weight') and m.weight is not None: init.normal_(m.weight.data, 1.0, gain) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'xavier_uniform': init.xavier_uniform_(m.weight.data, gain=1.0) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) elif init_type == 'none': # uses pytorch's default init method m.reset_parameters() else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) self.apply(init_func) # propagate to children for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights(init_type, gain)
Example #30
Source File: model.py From MobileNetV2-pytorch with MIT License | 5 votes |
def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0)