Python torch.nn.modules.utils._triple() Examples

The following are 10 code examples of torch.nn.modules.utils._triple(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.modules.utils , or try the search function .
Example #1
Source File: multiview.py    From pretorched-x with MIT License 6 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True):
        super().__init__(in_channels, out_channels, kernel_size, stride=stride,
                         padding=padding, dilation=dilation, groups=groups, bias=bias)

        # If ints are given, convert them to an iterable, 1 -> [1, 1, 1].
        padding = _triple(padding)
        kernel_size = _triple(kernel_size)
        self.stride = _triple(stride)
        self.dilation = _triple(dilation)
        self.channel_shape = (out_channels, in_channels // groups)

        self.paddings = [
            (0, padding[1], padding[2]),
            (padding[0], 0, padding[2]),
            (padding[0], padding[1], 0),
        ]

        self.kernel_sizes = [
            (1, kernel_size[1], kernel_size[2]),
            (kernel_size[0], 1, kernel_size[2]),
            (kernel_size[0], kernel_size[1], 1),
        ]
        self.linear = nn.Linear(3, 1) 
Example #2
Source File: basic_layers.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def consistent_padding_with_dilation(padding, dilation, dim=2):
    assert dim == 2 or dim == 3, 'Convolution layer only support 2D and 3D'
    if dim == 2:
        padding = _pair(padding)
        dilation = _pair(dilation)
    else:  # dim == 3
        padding = _triple(padding)
        dilation = _triple(dilation)

    padding = list(padding)
    for d in range(dim):
        padding[d] = dilation[d] if dilation[d] > 1 else padding[d]
    padding = tuple(padding)

    return padding, dilation 
Example #3
Source File: STVEN.py    From STVEN_rPPGNet with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super(SpatioTemporalConv, self).__init__()

        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)

        # decomposing the parameters into spatial and temporal components by
        # masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid unintentional
        # behavior such as padding being added twice
        spatial_kernel_size =  [1, kernel_size[1], kernel_size[2]]
        spatial_stride =  [1, stride[1], stride[2]]
        spatial_padding =  [0, padding[1], padding[2]]

        temporal_kernel_size = [kernel_size[0], 1, 1]
        temporal_stride =  [stride[0], 1, 1]
        temporal_padding =  [padding[0], 0, 0]

        # compute the number of intermediary channels (M) using formula 
        # from the paper section 3.5
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/(kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
        

        # the spatial conv is effectively a 2D conv due to the 
        # spatial_kernel_size, followed by batch_norm and ReLU
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                    stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()   ##   nn.Tanh()   or   nn.ReLU(inplace=True)


        # the temporal conv is effectively a 1D conv, but has batch norm 
        # and ReLU added inside the model constructor, not here. This is an 
        # intentional design choice, to allow this module to externally act 
        # identical to a standard Conv3D, so it can be reused easily in any 
        # other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, 
                                    stride=temporal_stride, padding=temporal_padding, bias=bias) 
Example #4
Source File: r2plus1d.py    From pretorched-x with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super().__init__()

        # If ints are given, convert them to an iterable, 1 -> [1, 1, 1].
        stride = _triple(stride)
        padding = _triple(padding)
        kernel_size = _triple(kernel_size)

        # Decompose the parameters into spatial and temporal components
        # by masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid aberrant
        # behavior such as padding being added twice.
        spatial_stride = [1, stride[1], stride[2]]
        spatial_padding = [0, padding[1], padding[2]]
        spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]

        temporal_stride = [stride[0], 1, 1]
        temporal_padding = [padding[0], 0, 0]
        temporal_kernel_size = [kernel_size[0], 1, 1]

        # Compute the number of intermediary channels (M) using formula
        # from the paper section 3.5:
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels) /
                                           (kernel_size[1] * kernel_size[2] * in_channels + kernel_size[0] * out_channels)))

        # The spatial conv is effectively a 2D conv due to the
        # spatial_kernel_size, followed by batch_norm and ReLU.
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                      stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()

        # The temporal conv is effectively a 1D conv, but has batch norm
        # and ReLU added inside the model constructor, not here. This is an
        # intentional design choice, to allow this module to externally act
        # identically to a standard Conv3D, so it can be reused easily in any other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size,
                                       stride=temporal_stride, padding=temporal_padding, bias=bias) 
Example #5
Source File: module.py    From R2Plus1D-PyTorch with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super(SpatioTemporalConv, self).__init__()

        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)

        # decomposing the parameters into spatial and temporal components by
        # masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid unintentional
        # behavior such as padding being added twice
        spatial_kernel_size =  [1, kernel_size[1], kernel_size[2]]
        spatial_stride =  [1, stride[1], stride[2]]
        spatial_padding =  [0, padding[1], padding[2]]

        temporal_kernel_size = [kernel_size[0], 1, 1]
        temporal_stride =  [stride[0], 1, 1]
        temporal_padding =  [padding[0], 0, 0]

        # compute the number of intermediary channels (M) using formula 
        # from the paper section 3.5
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/ \
                            (kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))

        # the spatial conv is effectively a 2D conv due to the 
        # spatial_kernel_size, followed by batch_norm and ReLU
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                    stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()

        # the temporal conv is effectively a 1D conv, but has batch norm 
        # and ReLU added inside the model constructor, not here. This is an 
        # intentional design choice, to allow this module to externally act 
        # identical to a standard Conv3D, so it can be reused easily in any 
        # other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, 
                                    stride=temporal_stride, padding=temporal_padding, bias=bias) 
Example #6
Source File: BayesianLayers.py    From Tutorial_BayesianCompressionForDL with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
                 cuda=False, init_weight=None, init_bias=None, clip_var=None):
        kernel_size = utils._triple(kernel_size)
        stride = utils._triple(stride)
        padding = utils._triple(padding)
        dilation = utils.triple(dilation)

        super(Conv3dGroupNJ, self).__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            False, utils._pair(0), groups, bias, init_weight, init_bias, cuda, clip_var) 
Example #7
Source File: R3D_model.py    From pytorch-video-recognition with MIT License 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False):
        super(SpatioTemporalConv, self).__init__()

        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)


        self.temporal_spatial_conv = nn.Conv3d(in_channels, out_channels, kernel_size,
                                    stride=stride, padding=padding, bias=bias)
        self.bn = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU() 
Example #8
Source File: traj_conv.py    From mmaction with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 num_deformable_groups=1,
                 im2col_step=64,
                 bias=True):
        super(TrajConv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = _triple(kernel_size)
        self.stride = _triple(stride)
        self.padding = _triple(padding)
        self.dilation = _triple(dilation)
        self.num_deformable_groups = num_deformable_groups
        self.im2col_step = im2col_step

        self.weight = nn.Parameter(
            torch.Tensor(out_channels, in_channels, *self.kernel_size))
        if bias:
            self.bias = nn.Parameter(
                torch.Tensor(out_channels,))
        else:
            self.bias = nn.Parameter(
                torch.zeros(0,))

        self.reset_parameters() 
Example #9
Source File: rPPGNet.py    From STVEN_rPPGNet with MIT License 4 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super(SpatioTemporalConv, self).__init__()

		
        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)

        # decomposing the parameters into spatial and temporal components by
        # masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid unintentional
        # behavior such as padding being added twice
        spatial_kernel_size =  [1, kernel_size[1], kernel_size[2]]
        spatial_stride =  [1, stride[1], stride[2]]
        spatial_padding =  [0, padding[1], padding[2]]

        temporal_kernel_size = [kernel_size[0], 1, 1]
        temporal_stride =  [stride[0], 1, 1]
        temporal_padding =  [padding[0], 0, 0]

        # compute the number of intermediary channels (M) using formula 
        # from the paper section 3.5
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/(kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
        
        # self-definition
        #intermed_channels = int((in_channels+intermed_channels)/2)

        # the spatial conv is effectively a 2D conv due to the 
        # spatial_kernel_size, followed by batch_norm and ReLU
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                    stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()   ##   nn.Tanh()   or   nn.ReLU(inplace=True)


        # the temporal conv is effectively a 1D conv, but has batch norm 
        # and ReLU added inside the model constructor, not here. This is an 
        # intentional design choice, to allow this module to externally act 
        # identical to a standard Conv3D, so it can be reused easily in any 
        # other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, 
                                    stride=temporal_stride, padding=temporal_padding, bias=bias) 
Example #10
Source File: traj_conv.py    From mmaction with Apache License 2.0 4 votes vote down vote up
def forward(ctx,
                input,
                offset,
                weight,
                bias,
                stride=1,
                padding=0,
                dilation=1,
                deformable_groups=1,
                im2col_step=64):
        if input is not None and input.dim() != 5:
            raise ValueError(
                "Expected 5D tensor as input, got {}D tensor instead.".format(
                    input.dim()))
        ctx.stride = _triple(stride)
        ctx.padding = _triple(padding)
        ctx.dilation = _triple(dilation)
        ctx.deformable_groups = deformable_groups
        ctx.im2col_step = im2col_step

        ctx.save_for_backward(input, offset, weight, bias)

        output = input.new(*TrajConvFunction._output_size(
            input, weight, ctx.padding, ctx.dilation, ctx.stride))

        ctx.bufs_ = [input.new(), input.new()]  # columns, ones

        if not input.is_cuda:
            raise NotImplementedError
        else:
            if isinstance(input, torch.autograd.Variable):
                if not (isinstance(input.data, torch.cuda.FloatTensor) or isinstance(input.data, torch.cuda.DoubleTensor)):
                    raise NotImplementedError
            else:
                if not (isinstance(input, torch.cuda.FloatTensor) or isinstance(input, torch.cuda.DoubleTensor)):
                    raise NotImplementedError

            cur_im2col_step = min(ctx.im2col_step, input.shape[0])
            assert (input.shape[0] %
                    cur_im2col_step) == 0, 'im2col step must divide batchsize'
            traj_conv_cuda.deform_3d_conv_forward_cuda(
                input, weight, bias, offset, output, ctx.bufs_[0], ctx.bufs_[1],
                weight.size(2), weight.size(3), weight.size(4),
                ctx.stride[0], ctx.stride[1], ctx.stride[2],
                ctx.padding[0], ctx.padding[1], ctx.padding[2],
                ctx.dilation[0], ctx.dilation[1], ctx.dilation[2], ctx.deformable_groups, cur_im2col_step)
        return output