Python torch.ceil() Examples

The following are 30 code examples of torch.ceil(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: solvers.py    From occupancy_flow with MIT License 6 votes vote down vote up
def _grid_constructor_from_step_size(self, step_size):

        def _grid_constructor(func, y0, t):
            start_time = t[0]
            end_time = t[-1]

            niters = torch.ceil((end_time - start_time) / step_size + 1).item()
            t_infer = torch.arange(0, niters).to(t) * step_size + start_time
            '''
            if t_infer[-1] > t[-1]:
                t_infer[-1] = t[-1]
            '''
            if t_infer[-1] != t[-1]:
                t_infer[-1] = t[-1]

            return t_infer

        return _grid_constructor 
Example #2
Source File: train.py    From pytorch-asr with GNU General Public License v3.0 6 votes vote down vote up
def validate(self, data_loader):
        "validate with label error rate by the edit distance between hyps and refs"
        self.model.eval()
        with torch.no_grad():
            N, D = 0, 0
            t = tqdm(enumerate(data_loader), total=len(data_loader), desc="validating")
            for i, (data) in t:
                xs, ys, frame_lens, label_lens, filenames, texts = data
                if self.use_cuda:
                    xs = xs.cuda()
                ys_hat = self.model(xs)
                # convert likes to ctc labels
                frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                hyps = [onehot2int(yh[:s]).squeeze() for yh, s in zip(ys_hat, frame_lens)]
                hyps = [remove_duplicates(h, blank=0) for h in hyps]
                # slice the targets
                pos = torch.cat((torch.zeros((1, ), dtype=torch.long), torch.cumsum(label_lens, dim=0)))
                refs = [ys[s:l] for s, l in zip(pos[:-1], pos[1:])]
                # calculate ler
                N += self.edit_distance(refs, hyps)
                D += sum(len(r) for r in refs)
                ler = N * 100. / D
                t.set_description(f"validating (LER: {ler:.2f} %)")
                t.refresh()
            logger.info(f"validating at epoch {self.epoch:03d}: LER {ler:.2f} %") 
Example #3
Source File: predict.py    From pytorch-asr with GNU General Public License v3.0 6 votes vote down vote up
def decode(self, data_loader):
        self.model.eval()
        with torch.no_grad():
            for i, (data) in enumerate(data_loader):
                # predict phones using AM
                xs, frame_lens, filenames = data
                if self.use_cuda:
                    xs = xs.cuda()
                ys_hat = self.model(xs)
                frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                # decode using Kaldi's latgen decoder
                # no need to normalize posteriors with state priors when we use CTC
                # https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43908.pdf
                loglikes = torch.log(ys_hat)
                if self.use_cuda:
                    loglikes = loglikes.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(loglikes, frame_lens)
                # print results
                loglikes = [l[:s] for l, s in zip(loglikes, frame_lens)]
                words = [w[:s] for w, s in zip(words, w_sizes)]
                for results in zip(filenames, loglikes, words):
                    self.print_result(*results) 
Example #4
Source File: predictor.py    From pytorch-asr with GNU General Public License v3.0 6 votes vote down vote up
def decode(self, data_loader):
        self.model.eval()
        with torch.no_grad():
            for xs, frame_lens, filenames in data_loader:
                # predict phones using AM
                if self.use_cuda:
                    xs = xs.cuda(non_blocking=True)
                ys_hat, frame_lens = self.model(xs, frame_lens)
                #frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                # decode using Kaldi's latgen decoder
                # no need to normalize posteriors with state priors when we use CTC
                # https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43908.pdf
                if self.use_cuda:
                    ys_hat = ys_hat.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(ys_hat, frame_lens)
                # print results
                ys_hat = [y[:s] for y, s in zip(ys_hat, frame_lens)]
                words = [w[:s] for w, s in zip(words, w_sizes)]
                for results in zip(filenames, ys_hat, words):
                    self.print_result(*results) 
Example #5
Source File: networks.py    From graphx-conv with MIT License 6 votes vote down vote up
def _project(self, img_feats, xs, ys):
        x, y = xs.flatten(), ys.flatten()
        idb = T.arange(img_feats.shape[0], device=img_feats.device)
        idb = idb[None].repeat(xs.shape[1], 1).t().flatten().long()

        x1, y1 = T.floor(x), T.floor(y)
        x2, y2 = T.ceil(x), T.ceil(y)
        q11 = img_feats[idb, :, x1.long(), y1.long()].to(img_feats.device)
        q12 = img_feats[idb, :, x1.long(), y2.long()].to(img_feats.device)
        q21 = img_feats[idb, :, x2.long(), y1.long()].to(img_feats.device)
        q22 = img_feats[idb, :, x2.long(), y2.long()].to(img_feats.device)

        weights = ((x2 - x) * (y2 - y)).unsqueeze(1)
        q11 *= weights

        weights = ((x - x1) * (y2 - y)).unsqueeze(1)
        q21 *= weights

        weights = ((x2 - x) * (y - y1)).unsqueeze(1)
        q12 *= weights

        weights = ((x - x1) * (y - y1)).unsqueeze(1)
        q22 *= weights
        out = q11 + q12 + q21 + q22
        return out.view(img_feats.shape[0], -1, img_feats.shape[1]) 
Example #6
Source File: networks.py    From graphx-conv with MIT License 6 votes vote down vote up
def _project_old(self, img_feats, xs, ys):
        out = []
        for i in range(list(img_feats.shape)[0]):
            x, y, img_feat = xs[i], ys[i], img_feats[i]
            x1, y1 = T.floor(x), T.floor(y)
            x2, y2 = T.ceil(x), T.ceil(y)
            q11 = img_feat[..., x1.long(), y1.long()].cuda()
            q12 = img_feat[..., x1.long(), y2.long()].cuda()
            q21 = img_feat[..., x2.long(), y1.long()].cuda()
            q22 = img_feat[..., x2.long(), y2.long()].cuda()

            weights = ((x2 - x) * (y2 - y)).unsqueeze(0)
            q11 *= weights

            weights = ((x - x1) * (y2 - y)).unsqueeze(0)
            q21 *= weights

            weights = ((x2 - x) * (y - y1)).unsqueeze(0)
            q12 *= weights

            weights = ((x - x1) * (y - y1)).unsqueeze(0)
            q22 *= weights
            out.append(q11 + q12 + q21 + q22)
        return T.stack(out).transpose(2, 1) 
Example #7
Source File: layers.py    From nonparaSeq2seqVC_code with MIT License 5 votes vote down vote up
def forward(self, x, input_lengths):
        '''
        x  [batch_size, mel_bins, T]

        return [batch_size, T, channels]
        '''
        x = x.transpose(1, 2)

        x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)

        x_packed = nn.utils.rnn.pack_padded_sequence(
            x_sorted, sorted_lengths.cpu().numpy(), batch_first=True)

        self.lstm1.flatten_parameters()
        outputs, _ = self.lstm1(x_packed)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True, total_length=x.size(1)) # use total_length make sure the recovered sequence length not changed

        outputs = outputs.reshape(x.size(0), -1, self.concat_hidden_dim)

        output_lengths = torch.ceil(sorted_lengths.float() / self.n_frames_per_step).long()
        outputs = nn.utils.rnn.pack_padded_sequence(
            outputs, output_lengths.cpu().numpy() , batch_first=True)

        self.lstm2.flatten_parameters()
        outputs, _ = self.lstm2(outputs)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True)

        return outputs[initial_index], output_lengths[initial_index] 
Example #8
Source File: TherEncoding.py    From DeepRobust with MIT License 5 votes vote down vote up
def one_hot(x, levels):
    """
    Output: One hot Encoding of the input.
    """

    batch_size, channel, H, W = x.size()
    x = x.unsqueeze_(4)
    x = torch.ceil(x * (LEVELS-1)).long()
    onehot = torch.zeros(batch_size, channel, H, W, levels).float().to('cuda').scatter_(4, x, 1)
    #print(onehot)

    return onehot 
Example #9
Source File: utils.py    From nonparaSeq2seqVC_code with MIT License 5 votes vote down vote up
def test_mask():
    lengths = torch.IntTensor([3,5,4])
    print(torch.ceil(lengths.float() / 2))

    data = torch.FloatTensor(3, 5, 2) # [B, T, D]
    data.fill_(1.)
    m = get_mask_from_lengths(lengths.cuda(), data.size(1))
    print(m)
    m =  m.unsqueeze(2).expand(-1,-1,data.size(2)).float()
    print(m)

    print(torch.sum(data.cuda() * m) / torch.sum(m)) 
Example #10
Source File: aev.py    From torchani with MIT License 5 votes vote down vote up
def compute_shifts(cell: Tensor, pbc: Tensor, cutoff: float) -> Tensor:
    """Compute the shifts of unit cell along the given cell vectors to make it
    large enough to contain all pairs of neighbor atoms with PBC under
    consideration

    Arguments:
        cell (:class:`torch.Tensor`): tensor of shape (3, 3) of the three
        vectors defining unit cell:
            tensor([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]])
        cutoff (float): the cutoff inside which atoms are considered pairs
        pbc (:class:`torch.Tensor`): boolean vector of size 3 storing
            if pbc is enabled for that direction.

    Returns:
        :class:`torch.Tensor`: long tensor of shifts. the center cell and
            symmetric cells are not included.
    """
    reciprocal_cell = cell.inverse().t()
    inv_distances = reciprocal_cell.norm(2, -1)
    num_repeats = torch.ceil(cutoff * inv_distances).to(torch.long)
    num_repeats = torch.where(pbc, num_repeats, num_repeats.new_zeros(()))
    r1 = torch.arange(1, num_repeats[0] + 1, device=cell.device)
    r2 = torch.arange(1, num_repeats[1] + 1, device=cell.device)
    r3 = torch.arange(1, num_repeats[2] + 1, device=cell.device)
    o = torch.zeros(1, dtype=torch.long, device=cell.device)
    return torch.cat([
        torch.cartesian_prod(r1, r2, r3),
        torch.cartesian_prod(r1, r2, o),
        torch.cartesian_prod(r1, r2, -r3),
        torch.cartesian_prod(r1, o, r3),
        torch.cartesian_prod(r1, o, o),
        torch.cartesian_prod(r1, o, -r3),
        torch.cartesian_prod(r1, -r2, r3),
        torch.cartesian_prod(r1, -r2, o),
        torch.cartesian_prod(r1, -r2, -r3),
        torch.cartesian_prod(o, r2, r3),
        torch.cartesian_prod(o, r2, o),
        torch.cartesian_prod(o, r2, -r3),
        torch.cartesian_prod(o, o, r3),
    ]) 
Example #11
Source File: model.py    From aerial_wildlife_detection with MIT License 5 votes vote down vote up
def getOutputSize(self, inputSize):
        if not isinstance(inputSize, torch.Tensor):
            inputSize = torch.tensor(inputSize)
        outputSize = inputSize.clone().float()

        for _ in range(4):
            outputSize = torch.ceil(outputSize / 2.0)

        return outputSize.int() 
Example #12
Source File: features.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def get_seq_len(self, seq_len):
        return torch.ceil(seq_len / self.hop_length).to(dtype=torch.long) 
Example #13
Source File: audio_preprocessing.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def get_seq_len(self, length):
        # Called by forward()
        return torch.ceil(length / self.hop_length).to(dtype=torch.long) 
Example #14
Source File: unary.py    From torch2trt with MIT License 5 votes vote down vote up
def aten_ceil(inputs, attributes, scope):
    inp = inputs[0]
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_unary(inp, trt.UnaryOperation.CEIL)
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        return [_op.ceil(inp)]

    return [torch.ceil(inp)] 
Example #15
Source File: rnn.py    From training with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        # T, B, U
        x, x_lens = x
        seq = [x]
        for i in range(1, self.factor):
            tmp = torch.zeros_like(x)
            tmp[:-i, :, :] = x[i:, :, :]
            seq.append(tmp)
        x_lens = torch.ceil(x_lens.float() / self.factor).int()
        return torch.cat(seq, dim=2)[::self.factor, :, :], x_lens 
Example #16
Source File: features.py    From training with Apache License 2.0 5 votes vote down vote up
def __init__(self, sample_rate=8000, window_size=0.02, window_stride=0.01,
                       n_fft=None,
                       window="hamming", normalize="per_feature", log=True, center=True,
                       dither=constant, pad_to=8, max_duration=16.7,
                       frame_splicing=1):
        super(SpectrogramFeatures, self).__init__()
        torch_windows = {
            'hann': torch.hann_window,
            'hamming': torch.hamming_window,
            'blackman': torch.blackman_window,
            'bartlett': torch.bartlett_window,
            'none': None,
        }
        self.win_length = int(sample_rate * window_size)
        self.hop_length = int(sample_rate * window_stride)
        self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))

        window_fn = torch_windows.get(window, None)
        window_tensor = window_fn(self.win_length,
                                  periodic=False) if window_fn else None
        self.window = window_tensor

        self.normalize = normalize
        self.log = log
        self.center = center
        self.dither = dither
        self.pad_to = pad_to
        self.frame_splicing = frame_splicing

        max_length = 1 + math.ceil(
                (max_duration * sample_rate - self.win_length) / self.hop_length
        )
        max_pad = 16 - (max_length % 16)
        self.max_length = max_length + max_pad 
Example #17
Source File: features.py    From training with Apache License 2.0 5 votes vote down vote up
def get_seq_len(self, seq_len):
        x = torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
            dtype=torch.int)
        if self.frame_splicing > 1:
            x = torch.ceil(x.float() / self.frame_splicing).to(dtype=torch.int)
        return x 
Example #18
Source File: features.py    From training with Apache License 2.0 5 votes vote down vote up
def get_seq_len(self, seq_len):
        x = torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
            dtype=torch.int)
            # dtype=torch.long)
        if self.frame_splicing > 1:
            x = torch.ceil(x.float() / self.frame_splicing).to(dtype=torch.int)
        return x 
Example #19
Source File: encodings.py    From bindsnet with GNU Affero General Public License v3.0 5 votes vote down vote up
def rank_order(
    datum: torch.Tensor, time: int, dt: float = 1.0, **kwargs
) -> torch.Tensor:
    # language=rst
    """
    Encodes data via a rank order coding-like representation. One spike per neuron,
    temporally ordered by decreasing intensity. Inputs must be non-negative.

    :param datum: Tensor of shape ``[n_samples, n_1, ..., n_k]``.
    :param time: Length of rank order-encoded spike train per input variable.
    :param dt: Simulation time step.
    :return: Tensor of shape ``[time, n_1, ..., n_k]`` of rank order-encoded spikes.
    """
    assert (datum >= 0).all(), "Inputs must be non-negative"

    shape, size = datum.shape, datum.numel()
    datum = datum.flatten()
    time = int(time / dt)

    # Create spike times in order of decreasing intensity.
    datum /= datum.max()
    times = torch.zeros(size)
    times[datum != 0] = 1 / datum[datum != 0]
    times *= time / times.max()  # Extended through simulation time.
    times = torch.ceil(times).long()

    # Create spike times tensor.
    spikes = torch.zeros(time, size).byte()
    for i in range(size):
        if 0 < times[i] < time:
            spikes[times[i] - 1, i] = 1

    return spikes.reshape(time, *shape) 
Example #20
Source File: operations.py    From NNEF-Tools with Apache License 2.0 5 votes vote down vote up
def nnef_logarithmic_quantize(x, max, bits):
    # type: (torch.Tensor, torch.Tensor, int)->torch.Tensor

    x, max = _expand_binary(x, max)

    r = float(2 ** bits - 1)
    m = torch.ceil(torch.log2(max))
    q = torch.round(nnef_clamp(torch.log2(torch.abs(x)), m - r, m))
    return torch.sign(x) * torch.pow(2.0, q) 
Example #21
Source File: wage_qtorch.py    From QPyTorch with MIT License 5 votes vote down vote up
def shift(x):
    max_entry = x.abs().max()
    return x / 2.0 ** torch.ceil(torch.log2(max_entry)) 
Example #22
Source File: layers.py    From nonparaSeq2seqVC_code with MIT License 5 votes vote down vote up
def forward(self, x, input_lengths):
        '''
        x  [batch_size, mel_bins, T]

        return [batch_size, T, channels]
        '''
        x = x.transpose(1, 2)

        x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)

        x_packed = nn.utils.rnn.pack_padded_sequence(
            x_sorted, sorted_lengths.cpu().numpy(), batch_first=True)

        self.lstm1.flatten_parameters()
        outputs, _ = self.lstm1(x_packed)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True, total_length=x.size(1)) # use total_length make sure the recovered sequence length not changed

        outputs = outputs.reshape(x.size(0), -1, self.concat_hidden_dim)

        output_lengths = torch.ceil(sorted_lengths.float() / self.n_frames_per_step).long()
        outputs = nn.utils.rnn.pack_padded_sequence(
            outputs, output_lengths.cpu().numpy() , batch_first=True)

        self.lstm2.flatten_parameters()
        outputs, _ = self.lstm2(outputs)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True)

        return outputs[initial_index], output_lengths[initial_index] 
Example #23
Source File: utils.py    From nonparaSeq2seqVC_code with MIT License 5 votes vote down vote up
def test_mask():
    lengths = torch.IntTensor([3,5,4])
    print(torch.ceil(lengths.float() / 2))

    data = torch.FloatTensor(3, 5, 2) # [B, T, D]
    data.fill_(1.)
    m = get_mask_from_lengths(lengths.cuda(), data.size(1))
    print(m)
    m =  m.unsqueeze(2).expand(-1,-1,data.size(2)).float()
    print(m)

    print(torch.sum(data.cuda() * m) / torch.sum(m)) 
Example #24
Source File: train.py    From pytorch-asr with GNU General Public License v3.0 5 votes vote down vote up
def test(self, data_loader):
        "test with word error rate by the edit distance between hyps and refs"
        self.model.eval()
        with torch.no_grad():
            N, D = 0, 0
            t = tqdm(enumerate(data_loader), total=len(data_loader), desc="testing")
            for i, (data) in t:
                xs, ys, frame_lens, label_lens, filenames, texts = data
                if self.use_cuda:
                    xs = xs.cuda()
                ys_hat = self.model(xs)
                frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                # latgen decoding
                loglikes = torch.log(ys_hat)
                if self.use_cuda:
                    loglikes = loglikes.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(loglikes, frame_lens)
                hyps = [w[:s] for w, s in zip(words, w_sizes)]
                # convert target texts to word indices
                w2i = lambda w: self.decoder.wordi[w] if w in self.decoder.wordi else self.decoder.wordi['<unk>']
                refs = [[w2i(w) for w in t.strip().split()] for t in texts]
                # calculate wer
                N += self.edit_distance(refs, hyps)
                D += sum(len(r) for r in refs)
                wer = N * 100. / D
                t.set_description(f"testing (WER: {wer:.2f} %)")
                t.refresh()
            logger.info(f"testing at epoch {self.epoch:03d}: WER {wer:.2f} %") 
Example #25
Source File: solvers.py    From torchdiffeq with MIT License 5 votes vote down vote up
def _grid_constructor_from_step_size(self, step_size):

        def _grid_constructor(func, y0, t):
            start_time = t[0]
            end_time = t[-1]

            niters = torch.ceil((end_time - start_time) / step_size + 1).item()
            t_infer = torch.arange(0, niters).to(t) * step_size + start_time
            if t_infer[-1] > t[-1]:
                t_infer[-1] = t[-1]

            return t_infer

        return _grid_constructor 
Example #26
Source File: rounding.py    From heat with MIT License 5 votes vote down vote up
def ceil(x, out=None):
    """
    Return the ceil of the input, element-wise.

    The ceil of the scalar x is the smallest integer i, such that i >= x. It is often denoted as :math:`\\lceil x \\rceil`.

    Parameters
    ----------
    x : ht.DNDarray
        The value for which to compute the ceiled values.
    out : ht.DNDarray or None, optional
        A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
        or set to None, a fresh tensor is allocated.

    Returns
    -------
    ceiled : ht.DNDarray
        A tensor of the same shape as x, containing the ceiled valued of each element in this tensor. If out was
        provided, ceiled is a reference to it.

    Examples
    --------
    >>> ht.ceil(ht.arange(-2.0, 2.0, 0.4))
    tensor([-2., -1., -1., -0., -0., -0.,  1.,  1.,  2.,  2.])
    """
    return operations.__local_op(torch.ceil, x, out) 
Example #27
Source File: encodings.py    From bindsnet with GNU Affero General Public License v3.0 5 votes vote down vote up
def rank_order(
    datum: torch.Tensor, time: int, dt: float = 1.0, **kwargs
) -> torch.Tensor:
    # language=rst
    """
    Encodes data via a rank order coding-like representation. One spike per neuron,
    temporally ordered by decreasing intensity. Inputs must be non-negative.

    :param datum: Tensor of shape ``[n_samples, n_1, ..., n_k]``.
    :param time: Length of rank order-encoded spike train per input variable.
    :param dt: Simulation time step.
    :return: Tensor of shape ``[time, n_1, ..., n_k]`` of rank order-encoded spikes.
    """
    assert (datum >= 0).all(), "Inputs must be non-negative"

    shape, size = datum.shape, datum.numel()
    datum = datum.flatten()
    time = int(time / dt)

    # Create spike times in order of decreasing intensity.
    datum /= datum.max()
    times = torch.zeros(size)
    times[datum != 0] = 1 / datum[datum != 0]
    times *= time / times.max()  # Extended through simulation time.
    times = torch.ceil(times).long()

    # Create spike times tensor.
    spikes = torch.zeros(time, size).byte()
    for i in range(size):
        if 0 < times[i] < time:
            spikes[times[i] - 1, i] = 1

    return spikes.reshape(time, *shape) 
Example #28
Source File: pano_opt.py    From pytorch-layoutnet with MIT License 5 votes vote down vote up
def map_coordinates(input, coordinates):
    ''' PyTorch version of scipy.ndimage.interpolation.map_coordinates
    input: (H, W)
    coordinates: (2, ...)
    '''
    h = input.shape[0]
    w = input.shape[1]

    def _coordinates_pad_wrap(h, w, coordinates):
        coordinates[0] = coordinates[0] % h
        coordinates[1] = coordinates[1] % w
        return coordinates

    co_floor = torch.floor(coordinates).long()
    co_ceil = torch.ceil(coordinates).long()
    d1 = (coordinates[1] - co_floor[1].float())
    d2 = (coordinates[0] - co_floor[0].float())
    co_floor = _coordinates_pad_wrap(h, w, co_floor)
    co_ceil = _coordinates_pad_wrap(h, w, co_ceil)
    f00 = input[co_floor[0], co_floor[1]]
    f10 = input[co_floor[0], co_ceil[1]]
    f01 = input[co_ceil[0], co_floor[1]]
    f11 = input[co_ceil[0], co_ceil[1]]
    fx1 = f00 + d1 * (f10 - f00)
    fx2 = f01 + d1 * (f11 - f01)
    return fx1 + d2 * (fx2 - fx1) 
Example #29
Source File: rnn.py    From inference with Apache License 2.0 5 votes vote down vote up
def forward(self, x, x_lens):
        # T, B, U
        seq = [x]
        for i in range(1, self.factor):
            # This doesn't seem to make much sense...
            tmp = torch.zeros_like(x)
            tmp[:-i, :, :] = x[i:, :, :]
            seq.append(tmp)
        x_lens = torch.ceil(x_lens.float() / self.factor).int()
        # Gross, this is horrible. What a waste of memory...
        return torch.cat(seq, dim=2)[::self.factor, :, :], x_lens 
Example #30
Source File: torch_utils.py    From pysaliency with MIT License 5 votes vote down vote up
def gaussian_filter_1d(tensor, dim, sigma, truncate=4, kernel_size=None, padding_mode='replicate', padding_value=0.0):
    sigma = torch.as_tensor(sigma, device=tensor.device, dtype=tensor.dtype)

    if kernel_size is not None:
        kernel_size = torch.as_tensor(kernel_size, device=tensor.device, dtype=torch.int64)
    else:
        kernel_size = torch.as_tensor(2 * torch.ceil(truncate * sigma) + 1, device=tensor.device, dtype=torch.int64)

    kernel_size = kernel_size.detach()

    kernel_size_int = kernel_size.detach().cpu().numpy()

    mean = (torch.as_tensor(kernel_size, dtype=tensor.dtype) - 1) / 2

    grid = torch.arange(kernel_size, device=tensor.device) - mean

    # reshape the grid so that it can be used as a kernel for F.conv1d
    kernel_shape = [1] * len(tensor.shape)
    kernel_shape[dim] = kernel_size_int
    grid = grid.view(kernel_shape)

    grid = grid.detach()

    padding = [0] * (2 * len(tensor.shape))
    padding[dim * 2 + 1] = math.ceil((kernel_size_int - 1) / 2)
    padding[dim * 2] = math.ceil((kernel_size_int - 1) / 2)
    padding = tuple(reversed(padding))

    if padding_mode in ['replicate']:
        # replication padding has some strange constraints...
        assert len(tensor.shape) - dim <= 2
        padding = padding[:(len(tensor.shape) - 2) * 2]

    tensor_ = F.pad(tensor, padding, padding_mode, padding_value)

    # create gaussian kernel from grid using current sigma
    kernel = torch.exp(-0.5 * (grid / sigma) ** 2)
    kernel = kernel / kernel.sum()

    # convolve input with gaussian kernel
    return F.conv1d(tensor_, kernel)