Python torch.utils.data.dataloader.numpy_type_map() Examples

The following are 12 code examples of torch.utils.data.dataloader.numpy_type_map(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.utils.data.dataloader , or try the search function .
Example #1
Source File: scatter_gather.py    From Detectron.pytorch with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #2
Source File: scatter_gather.py    From FPN-Pytorch with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #3
Source File: utils.py    From novelty-detection with MIT License 4 votes vote down vote up
def concat_collate(batch):
    # type: (List[torch.Tensor]) -> torch.Tensor
    """
    Puts each data field into a tensor stacking along the first dimension.
    This is different to the default pytorch collate that stacks samples rather than
    concatenating them.

    :param batch: the input batch to be collated.
    """
    error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
    elem_type = type(batch[0])
    if isinstance(batch[0], torch.Tensor):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.cat(batch, 0, out=out)
    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
            and elem_type.__name__ != 'string_':
        elem = batch[0]
        if elem_type.__name__ == 'ndarray':
            # array of string classes and object
            if re.search('[SaUO]', elem.dtype.str) is not None:
                raise TypeError(error_msg.format(elem.dtype))

            return torch.cat([torch.from_numpy(b) for b in batch], 0)
        if elem.shape == ():  # scalars
            py_type = float if elem.dtype.name.startswith('float') else int
            return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
    elif isinstance(batch[0], int_classes):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], string_classes):
        return batch
    elif isinstance(batch[0], collections.Mapping):
        return {key: concat_collate([d[key] for d in batch]) for key in batch[0]}
    elif isinstance(batch[0], collections.Sequence):
        transposed = zip(*batch)
        return [concat_collate(samples) for samples in transposed]

    raise TypeError((error_msg.format(type(batch[0])))) 
Example #4
Source File: scatter_gather.py    From pcl.pytorch with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #5
Source File: scatter_gather.py    From Detectron.pytorch with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #6
Source File: scatter_gather.py    From Context-aware-ZSR with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #7
Source File: scatter_gather.py    From PANet with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #8
Source File: scatter_gather.py    From PMFNet with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #9
Source File: scatter_gather.py    From Large-Scale-VRD.pytorch with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #10
Source File: scatter_gather.py    From detectron-self-train with MIT License 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #11
Source File: scatter_gather.py    From DIoU-pytorch-detectron with GNU General Public License v3.0 4 votes vote down vote up
def gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    error_msg = "outputs must contain tensors, numbers, dicts or lists; found {}"

    def gather_map(outputs):
        out = outputs[0]
        elem_type = type(out)
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, collections.Sequence):
            return type(out)(map(gather_map, zip(*outputs)))
        elif isinstance(out, collections.Mapping):
            return {key: gather_map([d[key] for d in outputs]) for key in out}
        elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
                and elem_type.__name__ != 'string_':
            elem = out
            if elem_type.__name__ == 'ndarray':
                # array of string classes and object
                if re.search('[SaUO]', elem.dtype.str) is not None:
                    raise TypeError(error_msg.format(elem.dtype))

                return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
            if elem.shape == ():  # scalars
                py_type = float if elem.dtype.name.startswith('float') else int
                return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
        elif isinstance(out, int_classes):
            return Variable(torch.LongTensor(outputs))
        elif isinstance(out, float):
            return Variable(torch.DoubleTensor(outputs))
        elif isinstance(out, string_classes):
            return outputs

        raise TypeError((error_msg.format(elem_type)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
Example #12
Source File: utils.py    From CenterNet-CondInst with MIT License 4 votes vote down vote up
def collate(batch):
    r"""Puts each data field into a tensor with outer dimension batch size"""

    error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
    elem_type = type(batch[0])
    if isinstance(batch[0], torch.Tensor):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.stack(batch, 0, out=out)
    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
            and elem_type.__name__ != 'string_':
        elem = batch[0]
        if elem_type.__name__ == 'ndarray':
            # array of string classes and object
            if re.search('[SaUO]', elem.dtype.str) is not None:
                raise TypeError(error_msg.format(elem.dtype))

            return torch.stack([torch.from_numpy(b) for b in batch], 0)
        if elem.shape == ():  # scalars
            py_type = float if elem.dtype.name.startswith('float') else int
            return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
    elif isinstance(batch[0], int_classes):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], string_classes):
        return batch
    elif isinstance(batch[0], collections.Mapping):
        res =  {key: collate([d[key] for d in batch]) for key in batch[0] if key!='instance_mask'}
        if 'instance_mask' in batch[0]:
            max_obj = max([d['instance_mask'].shape[0] for d in batch])
            instance_mask = torch.zeros(len(batch),max_obj,*(batch[0]['instance_mask'].shape[1:]))
            for i in range(len(batch)):
                num_obj = batch[i]['instance_mask'].shape[0]
                instance_mask[i,:num_obj] = torch.from_numpy(batch[i]['instance_mask'])
            res.update({'instance_mask':instance_mask})
        return res
    elif isinstance(batch[0], collections.Sequence):
        transposed = zip(*batch)
        return [collate(samples) for samples in transposed]

    raise TypeError((error_msg.format(type(batch[0]))))