Python torch.nn.init.uniform() Examples
The following are 30
code examples of torch.nn.init.uniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.init
, or try the search function
.
Example #1
Source File: config.py From pcl.pytorch with MIT License | 6 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.VGG.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #2
Source File: layers.py From ASER with MIT License | 6 votes |
def __init__(self, input_size, activation=nn.Tanh(), method="dot"): super(AttnScore, self).__init__() self.activation = activation self.input_size = input_size self.method = method if method == "general": self.linear = nn.Linear(input_size, input_size) init.uniform(self.linear.weight.data, -0.005, 0.005) elif method == "concat": self.linear_1 = nn.Linear(input_size*2, input_size) self.linear_2 = nn.Linear(input_size, 1) init.uniform(self.linear_1.weight.data, -0.005, 0.005) init.uniform(self.linear_2.weight.data, -0.005, 0.005) elif method == "tri_concat": self.linear = nn.Linear(input_size*3, 1) init.uniform(self.linear.weight.data, -0.005, 0.005)
Example #3
Source File: flow_models.py From hrnet with MIT License | 5 votes |
def __init__(self, args, batchNorm=False, div_flow = 20.): super(FlowNet2CS,self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.rgb_max = args.rgb_max self.args = args self.channelnorm = ChannelNorm() # First Block (FlowNetC) self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample1 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample1 = Resample2d() # Block (FlowNetS1) self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear') for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) # init_deconv_bilinear(m.weight)
Example #4
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #5
Source File: cnn_train.py From cgp-cnn-PyTorch with MIT License | 5 votes |
def weights_init_orthogonal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #6
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_normal_(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #7
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_normal(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: m.apply(weights_init_normal_) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #8
Source File: models.py From video-to-pose3D with MIT License | 5 votes |
def __init__(self, args, batchNorm=False, div_flow=20.): super(FlowNet2CS, self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.rgb_max = args.rgb_max self.args = args self.channelnorm = ChannelNorm() # First Block (FlowNetC) self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample1 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample1 = Resample2d() # Block (FlowNetS1) self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear') for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) # init_deconv_bilinear(m.weight)
Example #9
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_xavier(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #10
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_xavier(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #11
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #12
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_orthogonal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #13
Source File: cnn_train.py From cgp-cnn-PyTorch with MIT License | 5 votes |
def weights_init_normal(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: m.apply(weights_init_normal_) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #14
Source File: cnn_train.py From cgp-cnn-PyTorch with MIT License | 5 votes |
def weights_init_normal_(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #15
Source File: cnn_train.py From cgp-cnn-PyTorch with MIT License | 5 votes |
def weights_init_xavier(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #16
Source File: cnn_train.py From cgp-cnn-PyTorch with MIT License | 5 votes |
def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #17
Source File: cnn_train.py From Evolutionary-Autoencoders with MIT License | 5 votes |
def weights_init_normal_(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)
Example #18
Source File: models.py From TreeEnc with MIT License | 5 votes |
def reset_parameters(self): if self.use_batchnorm: self.bn_mlp_input.reset_parameters() self.bn_mlp_output.reset_parameters() for i in range(self.num_layers): linear_layer = self.mlp[i][0] init.kaiming_normal(linear_layer.weight.data) init.constant(linear_layer.bias.data, val=0) init.uniform(self.clf_linear.weight.data, -0.005, 0.005) init.constant(self.clf_linear.bias.data, val=0)
Example #19
Source File: config.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #20
Source File: networks.py From Bayesian-CycleGAN with MIT License | 5 votes |
def weights_init_uniform(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: init.uniform(m.weight.data, -0.06, 0.06) elif classname.find('Conv') != -1: init.uniform(m.weight.data, -0.06, 0.06) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 0.04, 1.06) init.constant(m.bias.data, 0.0)
Example #21
Source File: config.py From detectron-self-train with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=False): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #22
Source File: config.py From Large-Scale-VRD.pytorch with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS or __C.VGG16.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #23
Source File: config.py From PMFNet with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #24
Source File: model.py From NoisyNet-A3C with MIT License | 5 votes |
def reset_parameters(self): if hasattr(self, 'sigma_weight'): # Only init after all params added (otherwise super().__init__() fails) init.uniform(self.weight, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features)) init.uniform(self.bias, -math.sqrt(3 / self.in_features), math.sqrt(3 / self.in_features)) init.constant(self.sigma_weight, self.sigma_init) init.constant(self.sigma_bias, self.sigma_init)
Example #25
Source File: config.py From PANet with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #26
Source File: models.py From RCRNet-Pytorch with MIT License | 5 votes |
def __init__(self, args, batchNorm=False, div_flow = 20.): super(FlowNet2CS,self).__init__() self.batchNorm = batchNorm self.div_flow = div_flow self.rgb_max = args.rgb_max self.args = args self.channelnorm = ChannelNorm() # First Block (FlowNetC) self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') if args.fp16: self.resample1 = nn.Sequential( tofp32(), Resample2d(), tofp16()) else: self.resample1 = Resample2d() # Block (FlowNetS1) self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm) self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear') for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform(m.bias) init.xavier_uniform(m.weight) # init_deconv_bilinear(m.weight)
Example #27
Source File: config.py From Context-aware-ZSR with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.MODEL.TAGGING and __C.TEST.TAGGING: __C.TEST.USE_GT_PROPOSALS = True if __C.TEST.USE_GT_PROPOSALS: __C.MODEL.FASTER_RCNN = False # TODO: Currently this will not affect the training; It will potentially influence FPN if we do FPN __C.TEST.BBOX_REG = False if __C.MODEL.RELATION_COOCCUR: __C.MODEL.NUM_RELATIONS = 2 if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON or __C.TEST.USE_GT_PROPOSALS: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS or __C.TRAIN.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #28
Source File: config.py From Detectron.pytorch with MIT License | 5 votes |
def assert_and_infer_cfg(make_immutable=True): """Call this function in your script after you have finished setting all cfg values that are necessary (e.g., merging a config from a file, merging command line config options, etc.). By default, this function will also mark the global cfg as immutable to prevent changing the global cfg settings during script execution (which can lead to hard to debug errors or code that's harder to understand than is necessary). """ if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN: __C.RPN.RPN_ON = True if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON: __C.TEST.PRECOMPUTED_PROPOSALS = False if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS: assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \ "Path to the weight file must not be empty to load imagenet pertrained resnets." if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS: __C.MODEL.SHARE_RES5 = True if version.parse(torch.__version__) < version.parse('0.4.0'): __C.PYTORCH_VERSION_LESS_THAN_040 = True # create alias for PyTorch version less than 0.4.0 init.uniform_ = init.uniform init.normal_ = init.normal init.constant_ = init.constant nn.GroupNorm = mynn.GroupNorm if make_immutable: cfg.immutable(True)
Example #29
Source File: weight_initialization.py From aerial_mtl with BSD 3-Clause "New" or "Revised" License | 5 votes |
def init_weights(net, init_type='normal'): print('initialization method [%s]' % init_type) if init_type == 'normal': net.apply(weights_init_normal) elif init_type == 'uniform': net.apply(weights_init_uniform) elif init_type == 'xavier': net.apply(weights_init_xavier) elif init_type == 'kaiming': net.apply(weights_init_kaiming) elif init_type == 'orthogonal': net.apply(weights_init_orthogonal) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
Example #30
Source File: weight_initialization.py From aerial_mtl with BSD 3-Clause "New" or "Revised" License | 5 votes |
def weights_init_orthogonal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0)