Python torch.nn.functional.conv_transpose2d() Examples

The following are 30 code examples of torch.nn.functional.conv_transpose2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: invertible_resnet.py    From FrEIA with MIT License 6 votes vote down vote up
def lipschitz_correction(self):
        with torch.no_grad():
            # Power method to approximate spectral norm
            # Following https://arxiv.org/pdf/1804.04368.pdf
            for i in range(len(self.layers)):
                W = self.layers[i].weight
                x = torch.randn(self.lipschitz_batchsize, W.shape[1], *self.dims_in[1:], device=W.device)

                if len(self.dims_in) == 1:
                    # Linear case
                    for j in range(self.lipschitz_iterations):
                        x = W.t().matmul(W.matmul(x.unsqueeze(-1))).squeeze(-1)
                    spectral_norm = (torch.norm(W.matmul(x.unsqueeze(-1)).squeeze(-1), dim=1) /\
                                     torch.norm(x, dim=1)).max()
                else:
                    # Convolutional case
                    for j in range(self.lipschitz_iterations):
                        x = conv2d(x, W)
                        x = conv_transpose2d(x, W)
                    spectral_norm = (torch.norm(conv2d(x, W).view(self.lipschitz_batchsize, -1), dim=1) /\
                                     torch.norm(x.view(self.lipschitz_batchsize, -1), dim=1)).max()

                if spectral_norm > self.spectral_norm_max:
                    self.layers[i].weight.data *= self.spectral_norm_max / spectral_norm 
Example #2
Source File: pggan_generator_model.py    From interfacegan with MIT License 6 votes vote down vote up
def forward(self, x):
    x = self.pixel_norm(x)
    x = self.upsample(x)
    if hasattr(self, 'conv'):
      x = self.conv(x)
    else:
      kernel = self.weight * self.scale
      kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)
      kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +
                kernel[1:, :-1] + kernel[:-1, :-1])
      kernel = kernel.permute(2, 3, 0, 1)
      x = F.conv_transpose2d(x, kernel, stride=2, padding=1)
      x = x / self.scale
    x = self.wscale(x)
    x = self.activate(x)
    return x 
Example #3
Source File: quaternion_ops.py    From Pytorch-Quaternion-Neural-Networks with GNU General Public License v3.0 6 votes vote down vote up
def quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
                    padding, output_padding, groups, dilatation):
    """
    Applies a quaternion trasposed convolution to the incoming data:

    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)
    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)


    if   input.dim() == 3:
        convfunc = F.conv_transpose1d
    elif input.dim() == 4:
        convfunc = F.conv_transpose2d
    elif input.dim() == 5:
        convfunc = F.conv_transpose3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation) 
Example #4
Source File: pggan_generator_network.py    From higan with MIT License 6 votes vote down vote up
def forward(self, x):
    x = self.pixel_norm(x)
    x = self.upsample(x)
    if self.use_conv2d_transpose:
      kernel = self.weight * self.scale
      kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)
      kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +
                kernel[1:, :-1] + kernel[:-1, :-1])
      kernel = kernel.permute(2, 3, 0, 1)
      x = F.conv_transpose2d(x, kernel, stride=2, padding=1)
      x = x / self.scale
    else:
      x = self.conv(x)
    x = self.wscale(x)
    x = self.activate(x)
    return x 
Example #5
Source File: custom_layers.py    From ai-platform with MIT License 6 votes vote down vote up
def forward(self, x):
        # change to in_channels, out_channels, kernel_size, kernel_size
        weight = self.weight.permute([1, 0, 2, 3])
        weight = F.pad(weight, [1, 1, 1, 1])
        weight = (weight[:, :, 1:, 1:]
                  + weight[:, :, :-1, 1:]
                  + weight[:, :, 1:, :-1]
                  + weight[:, :, :-1, :-1]
                 )
        x = F.conv_transpose2d(x,
                               weight,
                               self.bias, # note if bias set to False, this will be None
                               stride=2,
                               padding=self.padding)
        return x

# TODO: this needs to be better wrappered by ConstrainedLayer for bias 
Example #6
Source File: network4att_test.py    From PiCANet-Implementation with MIT License 6 votes vote down vote up
def forward(self, *input):
        x = input[0]
        size = x.size()
        kernel = self.renet(x)
        kernel = F.softmax(kernel, 1)
        # print(kernel.size())
        x = F.unfold(x, [10, 10], dilation=[3, 3])
        x = x.reshape(size[0], size[1], 10 * 10)
        kernel = kernel.reshape(size[0], 100, -1)
        x = torch.matmul(x, kernel)
        x = x.reshape(size[0], size[1], size[2], size[3])

        # for attention visualization

        # print(torch.cuda.memory_allocated() / 1024 / 1024)
        attention = kernel.data
        attention = attention.requires_grad_(False)
        attention = torch.reshape(attention, (size[0], -1, 10, 10))
        # attention = F.conv_transpose2d(torch.ones((1, 1, 1, 1)).cuda(), attention, dilation=3)
        attention = F.interpolate(attention, 224, mode='bilinear', align_corners=True)
        # attention = F.interpolate(attention, 224, mode='area')
        attention = torch.reshape(attention, (size[0], size[2], size[3], 224, 224))
        return x, attention 
Example #7
Source File: reshapes.py    From FrEIA with MIT License 6 votes vote down vote up
def forward(self, x, rev=False):
        if not rev:
            self.last_jac = self.elements / 4 * (np.log(16.) + 4 * np.log(self.fac_fwd))
            out = F.conv2d(x[0], self.haar_weights,
                           bias=None, stride=2, groups=self.in_channels)
            if self.permute:
                return [out[:, self.perm] * self.fac_fwd]
            else:
                return [out * self.fac_fwd]

        else:
            self.last_jac = self.elements / 4 * (np.log(16.) + 4 * np.log(self.fac_rev))
            if self.permute:
                x_perm = x[0][:, self.perm_inv]
            else:
                x_perm = x[0]

            return [F.conv_transpose2d(x_perm * self.fac_rev, self.haar_weights,
                                     bias=None, stride=2, groups=self.in_channels)] 
Example #8
Source File: basic.py    From ffjord with MIT License 6 votes vote down vote up
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):
        super(HyperConv2d, self).__init__()
        assert dim_in % groups == 0 and dim_out % groups == 0, "dim_in and dim_out must both be divisible by groups."
        self.dim_in = dim_in
        self.dim_out = dim_out
        self.ksize = ksize
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.groups = groups
        self.bias = bias
        self.transpose = transpose

        self.params_dim = int(dim_in * dim_out * ksize * ksize / groups)
        if self.bias:
            self.params_dim += dim_out
        self._hypernet = nn.Linear(1, self.params_dim)
        self.conv_fn = F.conv_transpose2d if transpose else F.conv2d

        self._hypernet.apply(weights_init) 
Example #9
Source File: filter.py    From pytracking with GNU General Public License v3.0 6 votes vote down vote up
def _apply_feat_transpose_v1(feat, input, filter_ksz):
    """This one is slow as hell!!!!"""

    num_images = feat.shape[0]
    num_sequences = feat.shape[1] if feat.dim() == 5 else 1
    feat_sz = (feat.shape[-2], feat.shape[-1])
    if isinstance(filter_ksz, int):
        filter_ksz = (filter_ksz, filter_ksz)

    # trans_pad = sz + padding - filter_ksz
    trans_pad = [sz + ksz//2 - ksz for sz, ksz in zip(feat_sz, filter_ksz)]

    filter_grad = F.conv_transpose2d(input.flip((2, 3)).view(1, -1, input.shape[-2], input.shape[-1]),
                                     feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]),
                                     padding=trans_pad, groups=num_images * num_sequences)

    return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0) 
Example #10
Source File: pac.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def pacconv_transpose2d(input, kernel, weight, bias=None, stride=1, padding=0, output_padding=0, dilation=1,
                        shared_filters=False, native_impl=False):
    kernel_size = tuple(weight.shape[-2:])
    stride = _pair(stride)
    padding = _pair(padding)
    output_padding = _pair(output_padding)
    dilation = _pair(dilation)

    if native_impl:
        ch = input.shape[1]
        w = input.new_ones((ch, 1, 1, 1))
        x = F.conv_transpose2d(input, w, stride=stride, groups=ch)
        pad = [(kernel_size[i] - 1) * dilation[i] - padding[i] for i in range(2)]
        x = F.pad(x, (pad[1], pad[1] + output_padding[1], pad[0], pad[0] + output_padding[0]))
        output = pacconv2d(x, kernel, weight.permute(1, 0, 2, 3), bias, dilation=dilation,
                           shared_filters=shared_filters, native_impl=True)
    else:
        output = PacConvTranspose2dFn.apply(input, kernel, weight, bias, stride, padding, output_padding, dilation,
                                            shared_filters)

    return output 
Example #11
Source File: quaternion_ops.py    From Quaternion-Recurrent-Neural-Networks with GNU General Public License v3.0 6 votes vote down vote up
def quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride, 
                    padding, output_padding, groups, dilatation):
    """
    Applies a quaternion trasposed convolution to the incoming data:

    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)
    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)


    if   input.dim() == 3:
        convfunc = F.conv_transpose1d
    elif input.dim() == 4:
        convfunc = F.conv_transpose2d
    elif input.dim() == 5:
        convfunc = F.conv_transpose3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation) 
Example #12
Source File: adf.py    From SalsaNext with MIT License 5 votes vote down vote up
def forward(self, inputs_mean, inputs_variance, output_size=None):
        output_padding = self._output_padding(inputs_mean, output_size, self.stride, self.padding, self.kernel_size)
        outputs_mean = F.conv_transpose2d(
            inputs_mean, self.weight, self.bias, self.stride, self.padding,
            output_padding, self.groups, self.dilation)
        outputs_variance = F.conv_transpose2d(
            inputs_variance, self.weight ** 2, None, self.stride, self.padding,
            output_padding, self.groups, self.dilation)
        if self._keep_variance_fn is not None:
            outputs_variance = self._keep_variance_fn(outputs_variance)
        return outputs_mean, outputs_variance 
Example #13
Source File: stylegan_generator_network.py    From higan with MIT License 5 votes vote down vote up
def forward(self, x, w):
    if self.fused_scale:
      kernel = self.weight * self.scale
      kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)
      kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +
                kernel[1:, :-1] + kernel[:-1, :-1])
      kernel = kernel.permute(2, 3, 0, 1)
      x = F.conv_transpose2d(x, kernel, stride=2, padding=1)
    else:
      x = self.upsample(x)
      x = self.conv(x) * self.scale
    x = self.blur(x)
    x = self.epilogue(x, w)
    return x 
Example #14
Source File: fcn.py    From revolver with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def forward(self, x):
        # no groups (for speed with current pytorch impl.) and no bias
        return F.conv_transpose2d(x, self.weight, stride=self.rate) 
Example #15
Source File: stylegan_generator_model.py    From interfacegan with MIT License 5 votes vote down vote up
def forward(self, x, w):
    if self.fused_scale:
      kernel = self.weight * self.scale
      kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)
      kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +
                kernel[1:, :-1] + kernel[:-1, :-1])
      kernel = kernel.permute(2, 3, 0, 1)
      x = F.conv_transpose2d(x, kernel, stride=2, padding=1)
    else:
      x = self.upsample(x)
      x = self.conv(x) * self.scale
    x = self.blur(x)
    x = self.epilogue(x, w)
    return x 
Example #16
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_conv_transpose2d(self):
        # Data and weight tensors
        conv_transpose2d_tensor = torch.randn(64, 8, 5, 5, device='cuda', dtype=self.dtype)
        conv_transpose2d_filter = torch.randn(8, 16, 3, 3, device='cuda', dtype=self.dtype)
        conv_transpose2d_bias = torch.randn(16, device='cuda', dtype=self.dtype)
        # Conv transpose runs
        conv_transpose2d_out = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter)
        conv_transpose2d_out_biased = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, bias=conv_transpose2d_bias)
        conv_transpose2d_out_strided = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, stride=2)
        conv_transpose2d_out_padded = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, padding=3)
        conv_transpose2d_out2_padded = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, output_padding=2, dilation=3)
        conv_transpose2d_out_grouped = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, groups=2)
        conv_transpose2d_out_dilated = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, dilation=2) 
Example #17
Source File: pixelcnn_model.py    From ssl_bad_gan with MIT License 5 votes vote down vote up
def forward(self, input, output_size=None):
        if self.train_scale:
            weight_scale = self.weight_scale
        else:
            weight_scale = Variable(self.weight_scale)
        # normalize weight matrix and linear projection [in x out x h x w]
        # for each output dimension, normalize through (in, h, w)  = (0, 2, 3) dims
        norm_weight = self.weight * (weight_scale[None,:,None,None] / torch.sqrt((self.weight ** 2).sum(3).sum(2).sum(0) + 1e-6)).expand_as(self.weight)
        output_padding = self._output_padding(input, output_size)
        if old_version:
            bias = self.bias
        else:
            bias = None
        activation = F.conv_transpose2d(input, norm_weight, bias=bias, 
                                        stride=self.stride, padding=self.padding, 
                                        output_padding=output_padding, groups=self.groups)

        if self.init_mode == True:
            mean_act = activation.mean(3).mean(2).mean(0).squeeze()
            activation = activation - mean_act[None,:,None,None].expand_as(activation)

            inv_stdv = self.init_stdv / torch.sqrt((activation ** 2).mean(3).mean(2).mean(0) + 1e-8).squeeze()
            activation = activation * inv_stdv[None,:,None,None].expand_as(activation)

            if self.train_scale:
                self.weight_scale.data = self.weight_scale.data * inv_stdv.data
            else:
                self.weight_scale = self.weight_scale * inv_stdv.data
            self.bias.data = - mean_act.data * inv_stdv.data

        else:
            if self.bias is not None:
                activation = activation + self.bias[None,:,None,None].expand_as(activation)

        return activation 
Example #18
Source File: wideresnet.py    From meta-weight-net with MIT License 5 votes vote down vote up
def forward(self, x, output_size=None):
        output_padding = self._output_padding(x, output_size)
        return F.conv_transpose2d(x, self.weight, self.bias, self.stride, self.padding,
                                  output_padding, self.groups, self.dilation) 
Example #19
Source File: stn.py    From istn with Apache License 2.0 5 votes vote down vote up
def compute_displacement(self, params):
        # compute dense displacement
        displacement = F.conv_transpose2d(params, self.kernel,
                                          padding=self.padding, stride=self.stride, groups=2)

        # crop displacement
        displacement = displacement[:, :,
                       self.control_point_spacing[0] + self.crop_start[0]:-self.control_point_spacing[0] -
                                                                          self.crop_end[0],
                       self.control_point_spacing[1] + self.crop_start[1]:-self.control_point_spacing[1] -
                                                                          self.crop_end[1]]

        return displacement.permute(0, 2, 3, 1) 
Example #20
Source File: CustomLayers.py    From pro_gan_pytorch with MIT License 5 votes vote down vote up
def forward(self, x):
        """
        forward pass of the layer
        :param x: input
        :return: y => output
        """
        from torch.nn.functional import conv_transpose2d

        return conv_transpose2d(input=x,
                                weight=self.weight * self.scale,  # scale the weight on runtime
                                bias=self.bias if self.use_bias else None,
                                stride=self.stride,
                                padding=self.pad) 
Example #21
Source File: stylegan2_generator_network.py    From higan with MIT License 5 votes vote down vote up
def forward(self, x):
    weight = self.weight * self.weight_scale * self.lr_multiplier
    if self.scale_factor > 1:
      weight = weight.flip(0, 1).permute(2, 3, 0, 1)
      x = F.conv_transpose2d(x, weight, stride=self.scale_factor, padding=0)
      x = self.filter(x)
    else:
      weight = weight.permute(3, 2, 0, 1)
      x = F.conv2d(x, weight, stride=1, padding=self.conv_padding)

    if self.add_bias:
      bias = self.bias * self.lr_multiplier
      x = x + bias.view(1, -1, 1, 1)
    x = self.activate(x) * self.activate_scale
    return x 
Example #22
Source File: model.py    From ssl_bad_gan with MIT License 5 votes vote down vote up
def forward(self, input, output_size=None):
        if self.train_scale:
            weight_scale = self.weight_scale
        else:
            weight_scale = Variable(self.weight_scale)
        # normalize weight matrix and linear projection [in x out x h x w]
        # for each output dimension, normalize through (in, h, w)  = (0, 2, 3) dims
        norm_weight = self.weight * (weight_scale[None,:,None,None] / torch.sqrt((self.weight ** 2).sum(3).sum(2).sum(0) + 1e-6)).expand_as(self.weight)
        output_padding = self._output_padding(input, output_size)
        activation = F.conv_transpose2d(input, norm_weight, bias=None, 
                                        stride=self.stride, padding=self.padding, 
                                        output_padding=output_padding, groups=self.groups)

        if self.init_mode == True:
            mean_act = activation.mean(3).mean(2).mean(0).squeeze()
            activation = activation - mean_act[None,:,None,None].expand_as(activation)

            inv_stdv = self.init_stdv / torch.sqrt((activation ** 2).mean(3).mean(2).mean(0) + 1e-6).squeeze()
            activation = activation * inv_stdv[None,:,None,None].expand_as(activation)

            if self.train_scale:
                self.weight_scale.data = self.weight_scale.data * inv_stdv.data
            else:
                self.weight_scale = self.weight_scale * inv_stdv.data
            self.bias.data = - mean_act.data * inv_stdv.data

        else:
            if self.bias is not None:
                activation = activation + self.bias[None,:,None,None].expand_as(activation)

        return activation 
Example #23
Source File: models.py    From fast-depth with MIT License 5 votes vote down vote up
def forward(self, x):
        assert x.dim() == 4
        num_channels = x.size(1)
        return F.conv_transpose2d(x,
            self.mask.detach().type_as(x).expand(num_channels, 1, -1, -1),
            stride=self.stride, groups=num_channels) 
Example #24
Source File: dual_layers.py    From provable-robustness-max-linear-regions with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def T(self, *xs):
        x = xs[-1]
        if x is None:
            return None
        if xs[-1].dim() == 5:
            n = x.size(0)
            x = unbatch(x)
        out = conv_transpose2d(x, self.layer.weight,
                               stride=self.layer.stride,
                               padding=self.layer.padding)
        if xs[-1].dim() == 5:
            out = batch(out, n)
        return out 
Example #25
Source File: dual_layers.py    From provable-robustness-max-linear-regions with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def conv_transpose2d(x, *args, **kwargs):
    i = 0
    out = []
    batch_size = 10000
    while i < x.size(0):
        out.append(F.conv_transpose2d(x[i:min(i + batch_size, x.size(0))], *args, **kwargs))
        i += batch_size
    return torch.cat(out, 0) 
Example #26
Source File: shared.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def onnx_compatibale_interpolate(
    input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
    # NOTE: The input dimensions are interpreted in the form:
    # `mini-batch x channels x [optional depth] x [optional height] x width`.
    if size is None and scale_factor is not None:
        if input.dim() == 4:
            if isinstance(scale_factor, (int, float)):
                height_scale, width_scale = (scale_factor, scale_factor)
            else:
                assert isinstance(scale_factor, (tuple, list))
                assert len(scale_factor) == 2
                height_scale, width_scale = scale_factor

            assert not align_corners, "No matching C2 op for align_corners == True"
            if mode == "nearest":
                return torch.ops._caffe2.ResizeNearest(
                    input, order="NCHW", width_scale=width_scale, height_scale=height_scale
                )
            elif mode == "bilinear":
                logger.warning(
                    "Use F.conv_transpose2d for bilinear interpolate"
                    " because there's no such C2 op, this may cause significant"
                    " slowdown and the boundary pixels won't be as same as"
                    " using F.interpolate due to padding."
                )
                assert height_scale == width_scale
                return BilinearInterpolation(input, up_scale=height_scale)
        logger.warning("Output size is not static, it might cause ONNX conversion issue")

    return interp(input, size, scale_factor, mode, align_corners) 
Example #27
Source File: shared.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def BilinearInterpolation(tensor_in, up_scale):
    assert up_scale % 2 == 0, "Scale should be even"

    def upsample_filt(size):
        factor = (size + 1) // 2
        if size % 2 == 1:
            center = factor - 1
        else:
            center = factor - 0.5

        og = np.ogrid[:size, :size]
        return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)

    kernel_size = int(up_scale) * 2
    bil_filt = upsample_filt(kernel_size)

    dim = int(tensor_in.shape[1])
    kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
    kernel[range(dim), range(dim), :, :] = bil_filt

    tensor_out = F.conv_transpose2d(
        tensor_in,
        weight=to_device(torch.Tensor(kernel), tensor_in.device),
        bias=None,
        stride=int(up_scale),
        padding=int(up_scale / 2),
    )

    return tensor_out


# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
# using dynamic `scale_factor` rather than static `size`. (T43166860)
# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly. 
Example #28
Source File: CustomLayers.py    From BMSG-GAN with MIT License 5 votes vote down vote up
def forward(self, x):
        """
        forward pass of the layer
        :param x: input
        :return: y => output
        """
        from torch.nn.functional import conv_transpose2d

        return conv_transpose2d(input=x,
                                weight=self.weight * self.scale,  # scale the weight on runtime
                                bias=self.bias if self.use_bias else None,
                                stride=self.stride,
                                padding=self.pad) 
Example #29
Source File: adf.py    From uncertainty_estimation_deep_learning with MIT License 5 votes vote down vote up
def forward(self, inputs_mean, inputs_variance, output_size=None):
        output_padding = self._output_padding(inputs_mean, output_size, self.stride, self.padding, self.kernel_size)
        outputs_mean = F.conv_transpose2d(
            inputs_mean, self.weight, self.bias, self.stride, self.padding,
            output_padding, self.groups, self.dilation)
        outputs_variance = F.conv_transpose2d(
            inputs_variance, self.weight ** 2, None, self.stride, self.padding,
            output_padding, self.groups, self.dilation)
        if self._keep_variance_fn is not None:
            outputs_variance = self._keep_variance_fn(outputs_variance)
        return outputs_mean, outputs_variance 
Example #30
Source File: fcn8s.py    From MADAN with MIT License 5 votes vote down vote up
def forward(self, x):
		return F.conv_transpose2d(x, Variable(self.w), stride=self.factor)