Python torch.nn.functional.leaky_relu() Examples

The following are 30 code examples of torch.nn.functional.leaky_relu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: bn.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #2
Source File: stylegan2.py    From StyleGAN2_PyTorch with MIT License 6 votes vote down vote up
def forward(self, x):
        if self.bias is not None and self.mode != 'modulate':
            out = F.linear(x, self.weight * self.w_lrmul, self.bias * self.b_lrmul)
        elif self.bias is not None and self.mode == 'modulate':
            # original
            # out = F.linear(x, self.weight * self.w_lrmul, self.bias * self.b_lrmul) + 1
            # re-implement
            out = F.linear(x, self.weight * self.w_lrmul, self.bias * self.b_lrmul)
        else:
            out = F.linear(x, self.weight * self.w_lrmul)

        if self.act == 'lrelu':
            out = F.leaky_relu(out, 0.2, inplace=True)
            out = out * np.sqrt(2)  # original repo def_gain=np.sqrt(2).
            return out
        elif self.act == 'linear':
            return out

        return out 
Example #3
Source File: bn.py    From DeepLab-v3-plus-cityscapes with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #4
Source File: stylegan2.py    From StyleGAN2_PyTorch with MIT License 6 votes vote down vote up
def forward(self, x):
        # Pass Add bias.
        x += self.bias

        # Evaluate activation function.
        if self.act == "linear":
            pass
        elif self.act == 'lrelu':
            x = F.leaky_relu(x, self.alpha, inplace=True)
            x = x * np.sqrt(2)  # original repo def_gain=np.sqrt(2).

        # Scale by gain.
        if self.gain != 1:
            x = x * self.gain

        return x 
Example #5
Source File: bn.py    From ACAN with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #6
Source File: residual.py    From seamseg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x):
        if hasattr(self, "proj_conv"):
            residual = self.proj_conv(x)
            residual = self.proj_bn(residual)
        else:
            residual = x

        x = self.convs(x) + residual

        if self.convs.bn1.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.convs.bn1.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.convs.bn1.activation_param, inplace=True)
        elif self.convs.bn1.activation == "elu":
            return functional.elu(x, alpha=self.convs.bn1.activation_param, inplace=True)
        elif self.convs.bn1.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
Example #7
Source File: extractconvSDAE.py    From DCC with MIT License 6 votes vote down vote up
def forward(self,x):
        encoded = x
        for i, (encoder,bencoder) in enumerate(zip(self.enc,self.benc)):
            if i == self.nlayers-1:
                encoded = encoded.view(encoded.size(0), -1)
            encoded = encoder(encoded)
            if i < self.nlayers-1:
                encoded = bencoder(encoded)
                encoded = F.leaky_relu(encoded, negative_slope=self.reluslope)
        out = encoded
        for i, (decoder,bdecoder) in reversed(list(enumerate(zip(self.dec,self.bdec)))):
            if i == self.nlayers-1:
                out = out.view(out.size(0), -1, 1, 1)
            out = decoder(out)
            if i:
                out = bdecoder(out)
                out = F.leaky_relu(out, negative_slope=self.reluslope)
        return encoded, out 
Example #8
Source File: cifar10_LeNet.py    From Deep-SAD-PyTorch with MIT License 6 votes vote down vote up
def __init__(self, rep_dim=128):
        super().__init__()

        self.rep_dim = rep_dim

        self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
        self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
        self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
        self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
        self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
        self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
        self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu')) 
Example #9
Source File: model.py    From sagan-pytorch with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_class=10):
        super().__init__()

        def conv(in_channel, out_channel, stride=2,
                 self_attention=False):
            return ConvBlock(in_channel, out_channel, stride=stride,
                             bn=False, activation=leaky_relu,
                             upsample=False, self_attention=self_attention)

        self.conv = nn.Sequential(conv(3, 128),
                                  conv(128, 256),
                                  conv(256, 512, stride=1,
                                       self_attention=True),
                                  conv(512, 512),
                                  conv(512, 512),
                                  conv(512, 512))

        self.linear = spectral_init(nn.Linear(512, 1))

        self.embed = nn.Embedding(n_class, 512)
        self.embed.weight.data.uniform_(-0.1, 0.1)
        self.embed = spectral_norm(self.embed) 
Example #10
Source File: VisualizeAtariLearnedReward.py    From ICML2019-TREX with MIT License 6 votes vote down vote up
def cum_return(self, traj):
        '''calculate cumulative return of trajectory'''
        sum_rewards = 0
        sum_abs_rewards = 0
        for x in traj:
            x = x.permute(0,3,1,2) #get into NCHW format
            x = F.leaky_relu(self.conv1(x))
            x = F.leaky_relu(self.conv2(x))
            x = F.leaky_relu(self.conv3(x))
            x = F.leaky_relu(self.conv4(x))
            x = x.view(-1, 784)
            x = F.leaky_relu(self.fc1(x))
            r = torch.sigmoid(self.fc2(x))
            sum_rewards += r
            sum_abs_rewards += torch.abs(r)
        return sum_rewards, sum_abs_rewards 
Example #11
Source File: correlation1d_cost.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def correlation1d_cost(reference_fm, target_fm, max_disp=192, start_disp=0, dilation=1, disp_sample=None,
                       kernel_size=1, stride=1, padding=0, dilation_patch=1,):
    # for a pixel of left image at (x, y), it will calculates correlation cost volume
    # with pixel of right image at (xr, y), where xr in [x-max_disp, x+max_disp]
    # but we only need the left half part, i.e., [x-max_disp, 0]
    correlation_sampler = SpatialCorrelationSampler(patch_size=(1, max_disp * 2 - 1),
                                                    kernel_size=kernel_size,
                                                    stride=stride, padding=padding,
                                                    dilation_patch=dilation_patch)
    # [B, 1, max_disp*2-1, H, W]
    out = correlation_sampler(reference_fm, target_fm)

    # [B, max_disp*2-1, H, W]
    out = out.squeeze(1)

    # [B, max_disp, H, W], grad the left half searching part
    out = out[:, :max_disp, :, :]

    cost = F.leaky_relu(out, negative_slope=0.1, inplace=True)

    return cost 
Example #12
Source File: bn.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #13
Source File: misc.py    From seamseg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x):
        inv_var = torch.rsqrt(self.running_var + self.eps)
        if self.affine:
            alpha = self.weight * inv_var
            beta = self.bias - self.running_mean * alpha
        else:
            alpha = inv_var
            beta = - self.running_mean * alpha

        x.mul_(alpha.view(self._broadcast_shape(x)))
        x.add_(beta.view(self._broadcast_shape(x)))

        if self.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
        elif self.activation == "elu":
            return functional.elu(x, alpha=self.activation_param, inplace=True)
        elif self.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
Example #14
Source File: LearnAtariRewardAGC.py    From ICML2019-TREX with MIT License 6 votes vote down vote up
def cum_return(self, traj):
        '''calculate cumulative return of trajectory'''
        sum_rewards = 0
        sum_abs_rewards = 0
        x = traj.permute(0,3,1,2) #get into NCHW format
        #compute forward pass of reward network
        x = F.leaky_relu(self.conv1(x))
        x = F.leaky_relu(self.conv2(x))
        x = F.leaky_relu(self.conv3(x))
        x = F.leaky_relu(self.conv4(x))
        x = x.view(-1, 784)
        x = F.leaky_relu(self.fc1(x))
        r = self.fc2(x)
        sum_rewards += torch.sum(r)
        sum_abs_rewards += torch.sum(torch.abs(r))
        return sum_rewards, sum_abs_rewards 
Example #15
Source File: LearnAtariReward.py    From ICML2019-TREX with MIT License 6 votes vote down vote up
def cum_return(self, traj):
        '''calculate cumulative return of trajectory'''
        sum_rewards = 0
        sum_abs_rewards = 0
        x = traj.permute(0,3,1,2) #get into NCHW format
        #compute forward pass of reward network (we parallelize across frames so batch size is length of partial trajectory)
        x = F.leaky_relu(self.conv1(x))
        x = F.leaky_relu(self.conv2(x))
        x = F.leaky_relu(self.conv3(x))
        x = F.leaky_relu(self.conv4(x))
        x = x.view(-1, 784)
        x = F.leaky_relu(self.fc1(x))
        r = self.fc2(x)
        sum_rewards += torch.sum(r)
        sum_abs_rewards += torch.sum(torch.abs(r))
        return sum_rewards, sum_abs_rewards 
Example #16
Source File: model.py    From mnist-svhn-transfer with MIT License 5 votes vote down vote up
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out 
Example #17
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def __repr__(self):
        rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
              ' affine={affine}, activation={activation}'
        if self.activation == "leaky_relu":
            rep += ', slope={slope})'
        else:
            rep += ')'
        return rep.format(name=self.__class__.__name__, **self.__dict__) 
Example #18
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def forward(self, x):
        x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
                                  self.training, self.momentum, self.eps)

        if self.activation == ACT_RELU:
            return functional.relu(x, inplace=True)
        elif self.activation == ACT_LEAKY_RELU:
            return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
        elif self.activation == ACT_ELU:
            return functional.elu(x, inplace=True)
        else:
            return x 
Example #19
Source File: bn.py    From DeepLab-v3-plus-cityscapes with MIT License 5 votes vote down vote up
def forward(self, x):
        x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
                                  self.training, self.momentum, self.eps)

        if self.activation == ACT_RELU:
            return functional.relu(x, inplace=True)
        elif self.activation == ACT_LEAKY_RELU:
            return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
        elif self.activation == ACT_ELU:
            return functional.elu(x, inplace=True)
        else:
            return x 
Example #20
Source File: bn.py    From DeepLab-v3-plus-cityscapes with MIT License 5 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(ABN, self).__init__()
        self.num_features = num_features
        self.affine = affine
        self.eps = eps
        self.momentum = momentum
        self.activation = activation
        self.slope = slope
        if self.affine:
            self.weight = nn.Parameter(torch.ones(num_features))
            self.bias = nn.Parameter(torch.zeros(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        self.register_buffer('running_mean', torch.zeros(num_features))
        self.register_buffer('running_var', torch.ones(num_features))
        self.reset_parameters() 
Example #21
Source File: model.py    From mnist-svhn-transfer with MIT License 5 votes vote down vote up
def forward(self, x):
        out = F.leaky_relu(self.conv1(x), 0.05)    # (?, 64, 16, 16)
        out = F.leaky_relu(self.conv2(out), 0.05)  # (?, 128, 8, 8)
        out = F.leaky_relu(self.conv3(out), 0.05)  # (?, 256, 4, 4)
        out = self.fc(out).squeeze()
        return out 
Example #22
Source File: sft_arch.py    From BasicSR with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        # x[0]: fea; x[1]: cond
        scale = self.SFT_scale_conv1(F.leaky_relu(self.SFT_scale_conv0(x[1]), 0.01, inplace=True))
        shift = self.SFT_shift_conv1(F.leaky_relu(self.SFT_shift_conv0(x[1]), 0.01, inplace=True))
        return x[0] * scale + shift 
Example #23
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def __repr__(self):
        rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
              ' affine={affine}, activation={activation}'
        if self.activation == "leaky_relu":
            rep += ', slope={slope})'
        else:
            rep += ')'
        return rep.format(name=self.__class__.__name__, **self.__dict__) 
Example #24
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def forward(self, x):
        x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
                                  self.training, self.momentum, self.eps)

        if self.activation == ACT_RELU:
            return functional.relu(x, inplace=True)
        elif self.activation == ACT_LEAKY_RELU:
            return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
        elif self.activation == ACT_ELU:
            return functional.elu(x, inplace=True)
        else:
            return x 
Example #25
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(ABN, self).__init__()
        self.num_features = num_features
        self.affine = affine
        self.eps = eps
        self.momentum = momentum
        self.activation = activation
        self.slope = slope
        if self.affine:
            self.weight = nn.Parameter(torch.ones(num_features))
            self.bias = nn.Parameter(torch.zeros(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        self.register_buffer('running_mean', torch.zeros(num_features))
        self.register_buffer('running_var', torch.ones(num_features))
        self.reset_parameters() 
Example #26
Source File: utils.py    From person-reid-lib with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return F.leaky_relu(x, 0.1, inplace=True) 
Example #27
Source File: utils.py    From person-reid-lib with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return F.leaky_relu(x, 0.1, inplace=True) 
Example #28
Source File: model_resnet.py    From sagan-pytorch with Apache License 2.0 5 votes vote down vote up
def leaky_relu(input):
    return F.leaky_relu(input, negative_slope=0.2) 
Example #29
Source File: model.py    From sagan-pytorch with Apache License 2.0 5 votes vote down vote up
def leaky_relu(input):
    return F.leaky_relu(input, negative_slope=0.2) 
Example #30
Source File: model_resnet.py    From BigGAN-pytorch with Apache License 2.0 5 votes vote down vote up
def leaky_relu(input):
    return F.leaky_relu(input, negative_slope=0.2)