Python torch.ShortTensor() Examples
The following are 10
code examples of torch.ShortTensor().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: torchloader.py From mxbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _worker_loop(dataset, index_queue, data_queue, collate_fn): global _use_shared_memory _use_shared_memory = True # torch.set_num_threads(1) while True: r = index_queue.get() if r is None: data_queue.put(None) break idx, batch_indices = r try: samples = collate_fn([dataset[i] for i in batch_indices]) except Exception: data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) else: data_queue.put((idx, samples)) # numpy_type_map = { # 'float64': torch.DoubleTensor, # 'float32': torch.FloatTensor, # 'float16': torch.HalfTensor, # 'int64': torch.LongTensor, # 'int32': torch.IntTensor, # 'int16': torch.ShortTensor, # 'int8': torch.CharTensor, # 'uint8': torch.ByteTensor, # }
Example #2
Source File: checkpoint.py From translate with BSD 3-Clause "New" or "Revised" License | 5 votes |
def is_integer_tensor(tensor: torch.Tensor) -> bool: return ( isinstance(tensor, torch.ByteTensor) or isinstance(tensor, torch.CharTensor) or isinstance(tensor, torch.ShortTensor) or isinstance(tensor, torch.IntTensor) or isinstance(tensor, torch.LongTensor) )
Example #3
Source File: test_torch.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_allgather(self): """Test that the allgather correctly gathers 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor, torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor] if torch.cuda.is_available(): dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor, torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor] dims = [1, 2, 3] for dtype, dim in itertools.product(dtypes, dims): tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank) tensor = tensor.type(dtype) gathered = hvd.allgather(tensor) assert list(gathered.shape) == [17 * size] + [17] * (dim - 1) for i in range(size): rank_tensor = gathered[i * 17:(i + 1) * 17] assert list(rank_tensor.shape) == [17] * dim, \ 'hvd.allgather produces incorrect gathered shape' assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor' assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
Example #4
Source File: test_torch.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_allgather_variable_size(self): """Test that the allgather correctly gathers 1D, 2D, 3D tensors, even if those tensors have different sizes along the first dim.""" hvd.init() rank = hvd.rank() size = hvd.size() dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor, torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor] if torch.cuda.is_available(): dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor, torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor] dims = [1, 2, 3] for dtype, dim in itertools.product(dtypes, dims): # Support tests up to MPI Size of 35 if size > 35: break tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5 tensor_sizes = tensor_sizes[:size] tensor = torch.FloatTensor( *([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank) tensor = tensor.type(dtype) gathered = hvd.allgather(tensor) expected_size = sum(tensor_sizes) assert list(gathered.shape) == [expected_size] + [17] * (dim - 1) for i in range(size): rank_size = [tensor_sizes[i]] + [17] * (dim - 1) rank_tensor = gathered[sum( tensor_sizes[:i]):sum(tensor_sizes[:i + 1])] assert list(rank_tensor.shape) == rank_size assert rank_tensor.data.min() == i assert rank_tensor.data.max() == i
Example #5
Source File: test_torch.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_broadcast(self): """Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: return dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor, torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor] if torch.cuda.is_available(): dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor, torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor] dims = [1, 2, 3] root_ranks = list(range(size)) for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks): tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank) root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank) tensor = tensor.type(dtype) root_tensor = root_tensor.type(dtype) broadcasted_tensor = hvd.broadcast(tensor, root_rank) if rank != root_rank: assert (tensor == root_tensor).max() == 0, \ 'hvd.broadcast modifies source tensor' assert (broadcasted_tensor.data == root_tensor).min() == 1, \ 'hvd.broadcast produces incorrect broadcasted tensor'
Example #6
Source File: test_torch.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def test_horovod_broadcast_inplace(self): """Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors.""" hvd.init() rank = hvd.rank() size = hvd.size() # This test does not apply if there is only one worker. if size == 1: return dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor, torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor] if torch.cuda.is_available(): dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor, torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor] dims = [1, 2, 3] root_ranks = list(range(size)) for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks): tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank) root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank) tensor = tensor.type(dtype) root_tensor = root_tensor.type(dtype) broadcasted_tensor = hvd.broadcast_(tensor, root_rank) assert (tensor == broadcasted_tensor).min() == 1, \ 'hvd.broadcast does not modify source tensor' assert (broadcasted_tensor == root_tensor).min() == 1, \ 'hvd.broadcast produces incorrect broadcasted tensor'
Example #7
Source File: test_vae.py From vae-anomaly-detector with MIT License | 5 votes |
def _get_syntetic_target(self, shape): """Random normal N(0, 1) input""" p = torch.empty(shape).uniform_(0, 1) #return Variable(torch.bernoulli(p).type(torch.LongTensor)) #return Variable(torch.bernoulli(p).type(torch.ShortTensor)) return Variable(torch.bernoulli(p)).to(self._device)
Example #8
Source File: datasets.py From UnsupervisedGeometryAwareRepresentationLearning with GNU General Public License v3.0 | 4 votes |
def default_collate_with_string(batch): "Puts each data field into a tensor with outer dimension batch size" _use_shared_memory = False numpy_type_map = { 'float64': torch.DoubleTensor, 'float32': torch.FloatTensor, 'float16': torch.HalfTensor, 'int64': torch.LongTensor, 'int32': torch.IntTensor, 'int16': torch.ShortTensor, 'int8': torch.CharTensor, 'uint8': torch.ByteTensor, } string_classes = (str, bytes) if torch.is_tensor(batch[0]): #print("IN","torch.is_tensor(batch[0])") #IPython.embed() out = None if _use_shared_memory: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) #print("batch:",[e.numpy().shape for e in batch]) return torch.stack(batch, 0, out=out) elif type(batch[0]).__module__ == 'numpy': elem = batch[0] #print("IN", "type(batch[0]).__module__ == 'numpy'") #IPython.embed() if type(elem).__name__ == 'ndarray': if elem.dtype.kind in {'U', 'S'}: return np.stack(batch, 0) else: return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.FloatTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], collections.Mapping): return {key: default_collate_with_string([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [default_collate_with_string(samples) for samples in transposed] raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}" .format(type(batch[0]))))
Example #9
Source File: test_torch.py From training_results_v0.6 with Apache License 2.0 | 4 votes |
def test_horovod_allgather_grad(self): """Test the correctness of the allgather gradient.""" hvd.init() rank = hvd.rank() size = hvd.size() dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor, torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor] if torch.cuda.is_available(): dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor, torch.cuda.IntTensor, torch.cuda.LongTensor, torch.cuda.FloatTensor, torch.cuda.DoubleTensor] dims = [1, 2, 3] for dtype, dim in itertools.product(dtypes, dims): # Support tests up to MPI Size of 35 if size > 35: break tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5 tensor_sizes = tensor_sizes[:size] tensor = torch.FloatTensor( *([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank) tensor = tensor.type(dtype) tensor = torch.autograd.Variable(tensor, requires_grad=True) grad_list = [] for r, size in enumerate(tensor_sizes): grad_list.append(torch.ones([size] + [17] * (dim - 1)) * r) grad_ys = torch.cat(grad_list, dim=0) gathered = hvd.allgather(tensor) gathered.backward(grad_ys) grad_out = tensor.grad.data.numpy() expected = np.ones( [tensor_sizes[rank]] + [17] * (dim - 1) ) * rank * size err = np.linalg.norm(expected - grad_out) self.assertLess(err, 0.00000001, "gradient %s differs from expected %s, " "error: %s" % (grad_out, expected, str(err)))
Example #10
Source File: datasets.py From NeuralSceneDecomposition with GNU General Public License v3.0 | 4 votes |
def default_collate_with_string(batch): "Puts each data field into a tensor with outer dimension batch size" _use_shared_memory = False numpy_type_map = { 'float64': torch.DoubleTensor, 'float32': torch.FloatTensor, 'float16': torch.HalfTensor, 'int64': torch.LongTensor, 'int32': torch.IntTensor, 'int16': torch.ShortTensor, 'int8': torch.CharTensor, 'uint8': torch.ByteTensor, } string_classes = (str, bytes) if torch.is_tensor(batch[0]): #print("IN","torch.is_tensor(batch[0])") #IPython.embed() out = None if _use_shared_memory: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) #print("batch:",[e.numpy().shape for e in batch]) return torch.stack(batch, 0, out=out) elif type(batch[0]).__module__ == 'numpy': elem = batch[0] #print("IN", "type(batch[0]).__module__ == 'numpy'") #IPython.embed() if type(elem).__name__ == 'ndarray': if elem.dtype.kind in {'U', 'S'}: return np.stack(batch, 0) else: return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.FloatTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], collections.Mapping): return {key: default_collate_with_string([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [default_collate_with_string(samples) for samples in transposed] raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}" .format(type(batch[0]))))