Python torch.float32() Examples

The following are 30 code examples of torch.float32(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: anchor_generator.py    From Res2Net-maskrcnn with MIT License 6 votes vote down vote up
def grid_anchors(self, grid_sizes):
        anchors = []
        for size, stride, base_anchors in zip(
            grid_sizes, self.strides, self.cell_anchors
        ):
            grid_height, grid_width = size
            device = base_anchors.device
            shifts_x = torch.arange(
                0, grid_width * stride, step=stride, dtype=torch.float32, device=device
            )
            shifts_y = torch.arange(
                0, grid_height * stride, step=stride, dtype=torch.float32, device=device
            )
            shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
            shift_x = shift_x.reshape(-1)
            shift_y = shift_y.reshape(-1)
            shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)

            anchors.append(
                (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
            )

        return anchors 
Example #2
Source File: wav_utils.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def normalize_wav(tensor: torch.Tensor) -> torch.Tensor:
    if tensor.dtype == torch.float32:
        pass
    elif tensor.dtype == torch.int32:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 2147483647.
        tensor[tensor < 0] /= 2147483648.
    elif tensor.dtype == torch.int16:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 32767.
        tensor[tensor < 0] /= 32768.
    elif tensor.dtype == torch.uint8:
        tensor = tensor.to(torch.float32) - 128
        tensor[tensor > 0] /= 127.
        tensor[tensor < 0] /= 128.
    return tensor 
Example #3
Source File: pytorch_ext.py    From L3C-PyTorch with GNU General Public License v3.0 6 votes vote down vote up
def one_hot(x, L, Ldim):
    """ add dim L at Ldim """
    assert Ldim >= 0 or Ldim == -1, f'Only supporting Ldim >= 0 or Ldim == -1: {Ldim}'
    out_shape = list(x.shape)
    if Ldim == -1:
        out_shape.append(L)
    else:
        out_shape.insert(Ldim, L)
    x = x.unsqueeze(Ldim)  # x must match # dims of outshape
    assert x.dim() == len(out_shape), (x.shape, out_shape)
    oh = torch.zeros(*out_shape, dtype=torch.float32, device=x.device)
    oh.scatter_(Ldim, x, 1)
    return oh


# ------------------------------------------------------------------------------ 
Example #4
Source File: exp_synph.py    From connecting_the_dots with MIT License 6 votes vote down vote up
def get_test_sets(self):
    test_sets = torchext.TestSets()
    test_set = dataset.TrackSynDataset(self.settings_path, self.test_paths, train=False, data_aug=True, track_length=1)
    test_sets.append('simple', test_set, test_frequency=1)

    # initialize photometric loss modules according to image sizes
    self.losses = []
    for imsize, pat in zip(test_set.imsizes, test_set.patterns):
      pat = pat.mean(axis=2)
      pat = torch.from_numpy(pat[None][None].astype(np.float32))
      pat = pat.to(self.train_device)
      self.lcn_in = self.lcn_in.to(self.train_device)
      pat,_ = self.lcn_in(pat)
      pat = torch.cat([pat for idx in range(3)], dim=1)
      self.losses.append( networks.RectifiedPatternSimilarityLoss(imsize[0],imsize[1], pattern=pat) )

    return test_sets 
Example #5
Source File: utils.py    From integrated-gradient-pytorch with MIT License 6 votes vote down vote up
def pre_processing(obs, cuda):
    mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])
    std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])
    obs = obs / 255
    obs = (obs - mean) / std
    obs = np.transpose(obs, (2, 0, 1))
    obs = np.expand_dims(obs, 0)
    obs = np.array(obs)
    if cuda:
        torch_device = torch.device('cuda:0')
    else:
        torch_device = torch.device('cpu')
    obs_tensor = torch.tensor(obs, dtype=torch.float32, device=torch_device, requires_grad=True)
    return obs_tensor

# generate the entire images 
Example #6
Source File: 35_lstm_raw.py    From deep-learning-note with MIT License 6 votes vote down vote up
def get_params():
    def _one(shape):
        ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
        return torch.nn.Parameter(ts, requires_grad=True)

    def _three():
        return (_one((num_inputs, num_hiddens)),
                _one((num_hiddens, num_hiddens)),
                torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))

    W_xi, W_hi, b_i = _three() # 输入门
    W_xf, W_hf, b_f = _three() # 遗忘门
    W_xo, W_ho, b_o = _three() # 输出门
    W_xc, W_hc, b_c = _three() # 候选记忆细胞

    # 输出层参数
    W_hq = _one((num_hiddens, num_outputs))
    b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
    return nn.ParameterList([W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]) 
Example #7
Source File: 30_series_sampling.py    From deep-learning-note with MIT License 6 votes vote down vote up
def data_iter_random(corpus_indices, batch_size, num_steps, device=None):
    # 减1是因为输出的索引x是相应输入的索引y加1
    num_examples = (len(corpus_indices) - 1) // num_steps
    epoch_size = num_examples // batch_size
    example_indices = list(range(num_examples))
    random.shuffle(example_indices)

    # 返回从pos开始的长为num_steps的序列
    def _data(pos):
        return corpus_indices[pos: pos + num_steps]
    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    for i in range(epoch_size):
        # 每次读取batch_size个随机样本
        i = i * batch_size
        batch_indices = example_indices[i: i + batch_size]
        X = [_data(j * num_steps) for j in batch_indices]
        Y = [_data(j * num_steps + 1) for j in batch_indices]
        yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device) 
Example #8
Source File: keypoint.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def __init__(self, keypoints, size, mode=None):
        # FIXME remove check once we have better integration with device
        # in my version this would consistently return a CPU tensor
        device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device('cpu')
        keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
        num_keypoints = keypoints.shape[0]
        if num_keypoints:
            keypoints = keypoints.view(num_keypoints, -1, 3)

        # TODO should I split them?
        # self.visibility = keypoints[..., 2]
        self.keypoints = keypoints  # [..., :2]

        self.size = size
        self.mode = mode
        self.extra_fields = {} 
Example #9
Source File: bounding_box.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def __init__(self, bbox, image_size, mode="xyxy"):
        device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
        bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
        if bbox.ndimension() != 2:
            raise ValueError(
                "bbox should have 2 dimensions, got {}".format(bbox.ndimension())
            )
        if bbox.size(-1) != 4:
            raise ValueError(
                "last dimension of bbox should have a "
                "size of 4, got {}".format(bbox.size(-1))
            )
        if mode not in ("xyxy", "xywh"):
            raise ValueError("mode should be 'xyxy' or 'xywh'")

        self.bbox = bbox
        self.size = image_size  # (image_width, image_height)
        self.mode = mode
        self.extra_fields = {} 
Example #10
Source File: anchor_generator.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def grid_anchors(self, grid_sizes):
        anchors = []
        for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors):
            grid_height, grid_width = size
            device = base_anchors.device
            shifts_x = torch.arange(
                0, grid_width * stride, step=stride, dtype=torch.float32, device=device
            )
            shifts_y = torch.arange(
                0, grid_height * stride, step=stride, dtype=torch.float32, device=device
            )
            shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
            shift_x = shift_x.reshape(-1)
            shift_y = shift_y.reshape(-1)
            shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)

            anchors.append(
                (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
            )

        return anchors 
Example #11
Source File: bounding_box.py    From Res2Net-maskrcnn with MIT License 6 votes vote down vote up
def __init__(self, bbox, image_size, mode="xyxy"):
        device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
        bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
        if bbox.ndimension() != 2:
            raise ValueError(
                "bbox should have 2 dimensions, got {}".format(bbox.ndimension())
            )
        if bbox.size(-1) != 4:
            raise ValueError(
                "last dimenion of bbox should have a "
                "size of 4, got {}".format(bbox.size(-1))
            )
        if mode not in ("xyxy", "xywh"):
            raise ValueError("mode should be 'xyxy' or 'xywh'")

        self.bbox = bbox
        self.size = image_size  # (image_width, image_height)
        self.mode = mode
        self.extra_fields = {} 
Example #12
Source File: utils.py    From deep-learning-note with MIT License 6 votes vote down vote up
def data_iter_random(corpus_indices, batch_size, num_steps, device=None):
    # 减1是因为输出的索引x是相应输入的索引y加1
    num_examples = (len(corpus_indices) - 1) // num_steps
    epoch_size = num_examples // batch_size
    example_indices = list(range(num_examples))
    random.shuffle(example_indices)

    # 返回从pos开始的长为num_steps的序列
    def _data(pos):
        return corpus_indices[pos: pos + num_steps]

    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    for i in range(epoch_size):
        # 每次读取batch_size个随机样本
        i = i * batch_size
        batch_indices = example_indices[i: i + batch_size]
        X = [_data(j * num_steps) for j in batch_indices]
        Y = [_data(j * num_steps + 1) for j in batch_indices]
        yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device) 
Example #13
Source File: utils.py    From pytorch_sac_ae with MIT License 6 votes vote down vote up
def __init__(self, obs_shape, action_shape, capacity, batch_size, device):
        self.capacity = capacity
        self.batch_size = batch_size
        self.device = device

        # the proprioceptive obs is stored as float32, pixels obs as uint8
        obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8

        self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
        self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
        self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
        self.rewards = np.empty((capacity, 1), dtype=np.float32)
        self.not_dones = np.empty((capacity, 1), dtype=np.float32)

        self.idx = 0
        self.last_save = 0
        self.full = False 
Example #14
Source File: keypoint.py    From Res2Net-maskrcnn with MIT License 6 votes vote down vote up
def __init__(self, keypoints, size, mode=None):
        # FIXME remove check once we have better integration with device
        # in my version this would consistently return a CPU tensor
        device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device('cpu')
        keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
        num_keypoints = keypoints.shape[0]
        if num_keypoints:
            keypoints = keypoints.view(num_keypoints, -1, 3)
        
        # TODO should I split them?
        # self.visibility = keypoints[..., 2]
        self.keypoints = keypoints# [..., :2]

        self.size = size
        self.mode = mode
        self.extra_fields = {} 
Example #15
Source File: 33_gru_raw.py    From deep-learning-note with MIT License 6 votes vote down vote up
def get_params():
    def _one(shape):
        ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
        return torch.nn.Parameter(ts, requires_grad=True)

    def _three():
        return (_one((num_inputs, num_hiddens)),
                _one((num_hiddens, num_hiddens)),
                torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))

    W_xz, W_hz, b_z = _three() # 更新门参数
    W_xr, W_hr, b_r = _three() # 重置门参数
    W_xh, W_hh, b_h = _three() # 候选隐藏层参数

    # 输出层参数
    W_hq = _one((num_hiddens, num_outputs))
    b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
    return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]) 
Example #16
Source File: poolers.py    From Res2Net-maskrcnn with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #17
Source File: poolers.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def __init__(self, method, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        assert method in {'ROIPool', 'ROIAlign', 'ROIAlignV2'}, 'Unknown pooling method: {}'.format(method)

        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            if method == "ROIPool":
                poolers.append(
                    ROIPool(
                        output_size, spatial_scale=scale
                    )
                )
            elif method == "ROIAlign":
                poolers.append(
                    ROIAlign(
                        output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False
                    )
                )
            elif method == "ROIAlignV2":
                poolers.append(
                    ROIAlign(
                        output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True
                    )
                )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #18
Source File: loss.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def project_masks_on_boxes(segmentation_masks, proposals, resolution):
    """
    Given segmentation masks and the bounding boxes corresponding
    to the location of the masks in the image, this function
    crops and resizes the masks in the position defined by the
    boxes. This prepares the masks for them to be fed to the
    loss computation as the targets.

    Arguments:
        segmentation_masks: an instance of SegmentationMask
        proposals: an instance of BoxList
    """
    masks = []
    h, w = resolution
    device = proposals.bbox.device
    proposals = proposals.convert("xyxy")
    assert segmentation_masks.size == proposals.size, "{}, {}".format(
        segmentation_masks, proposals
    )

    # FIXME: CPU computation bottleneck, this should be parallelized
    proposals = proposals.bbox.to(torch.device("cpu"))
    for segmentation_mask, proposal in zip(segmentation_masks, proposals):
        # crop the masks, resize them to the desired resolution and
        # then convert them to the tensor representation.
        cropped_mask = segmentation_mask.crop(proposal)
        scaled_mask = cropped_mask.resize((w, h))
        mask = scaled_mask.get_mask_tensor()
        masks.append(mask)

    if len(masks) == 0:
        return torch.empty(0, dtype=torch.float32, device=device)
    return torch.stack(masks, dim=0).to(device, dtype=torch.float32) 
Example #19
Source File: segmentation_mask.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def __init__(self, polygons, size):
        """
            Arguments:
                a list of lists of numbers.
                The first level refers to all the polygons that compose the
                object, and the second level to the polygon coordinates.
        """
        if isinstance(polygons, (list, tuple)):
            valid_polygons = []
            for p in polygons:
                p = torch.as_tensor(p, dtype=torch.float32)
                if len(p) >= 6:  # 3 * 2 coordinates
                    valid_polygons.append(p)
            polygons = valid_polygons

        elif isinstance(polygons, PolygonInstance):
            polygons = copy.copy(polygons.polygons)

        else:
            RuntimeError(
                "Type of argument `polygons` is not allowed:%s" % (type(polygons))
            )

        """ This crashes the training way too many times...
        for p in polygons:
            assert p[::2].min() >= 0
            assert p[::2].max() < size[0]
            assert p[1::2].min() >= 0
            assert p[1::2].max() , size[1]
        """

        self.polygons = polygons
        self.size = tuple(size) 
Example #20
Source File: networks.py    From connecting_the_dots with MIT License 5 votes vote down vote up
def __init__(self, focal_length, baseline, im_height, im_width):
    super().__init__(focal_length, baseline)
    self.mod_name = 'PosToDepth'

    self.im_height = im_height
    self.im_width = im_width
    self.u_pos = torch.arange(im_width, dtype=torch.float32).view(1,1,1,-1) 
Example #21
Source File: segmentation_mask.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def __init__(self, polygons, size, mode):
        # assert isinstance(polygons, list), '{}'.format(polygons)
        if isinstance(polygons, list):
            polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
        elif isinstance(polygons, Polygons):
            polygons = polygons.polygons

        self.polygons = polygons
        self.size = size
        self.mode = mode 
Example #22
Source File: exp_synph.py    From connecting_the_dots with MIT License 5 votes vote down vote up
def loss_forward(self, out, train):
    out, edge = out
    if not(isinstance(out, tuple) or isinstance(out, list)):
      out = [out]
    if not(isinstance(edge, tuple) or isinstance(edge, list)):
      edge = [edge]

    vals = []

    # apply photometric loss
    for s,l,o in zip(itertools.count(), self.losses, out):
      val, pattern_proj = l(o, self.data[f'im{s}'][:,0:1,...], self.data[f'std{s}'])
      if s == 0: 
        self.pattern_proj = pattern_proj.detach()
      vals.append(val)

    # apply disparity loss
    # 1-edge as ground truth edge if inversed
    edge0 = 1-torch.sigmoid(edge[0])
    val = self.disparity_loss(out[0], edge0)
    if self.dp_weight>0:
      vals.append(val * self.dp_weight)

    # apply edge loss on a subset of training samples
    for s,e in zip(itertools.count(), edge):
      # inversed ground truth edge where 0 means edge
      grad = self.data[f'grad{s}']<0.2
      grad = grad.to(torch.float32)
      ids = self.data['id']
      mask = ids>self.train_edge
      if mask.sum()>0:
        val = self.edge_loss(e[mask], grad[mask])
      else:
        val = torch.zeros_like(vals[0]) 
      if s == 0:
        self.edge = e.detach()
        self.edge = torch.sigmoid(self.edge)
        self.edge_gt = grad.detach() 
      vals.append(val)

    return vals 
Example #23
Source File: concept_embedding_ls.py    From NSCL-PyTorch-Release with MIT License 5 votes vote down vote up
def init_concepts(self, all_concepts, embeddings=None):
        assert self.nr_concepts == 0
        self.all_concepts.extend([str(x) for x in all_concepts])
        self.all_concepts.sort()
        self.concept_embeddings = nn.Embedding(len(self.all_concepts), self.concept_embedding_dim)
        if embeddings is not None:
            conc_embeddings = torch.tensor([embeddings[k] for k in self.all_concepts], dtype=torch.float32)
            self.concept_embeddings.weight.data.copy_(conc_embeddings) 
Example #24
Source File: concept_embedding_ls.py    From NSCL-PyTorch-Release with MIT License 5 votes vote down vote up
def init_attributes(self, all_attributes, embeddings=None):
        assert self.nr_attributes == 0
        self.all_attributes.extend([str(x) for x in all_attributes])
        self.all_attributes.sort()
        if self.nr_attributes == 0:
            return

        self.attribute_embeddings = nn.Embedding(len(self.all_attributes), self.attribute_embedding_dim)
        if embeddings is not None:
            attr_embeddings = torch.tensor([embeddings[k] for k in self.all_attributes], dtype=torch.float32)
            self.attribute_embeddings.weight.data.copy_(attr_embeddings) 
Example #25
Source File: losses.py    From NSCL-PyTorch-Release with MIT License 5 votes vote down vote up
def forward(self, input, labels):
        if type(labels) in (tuple, list):
            labels = torch.tensor(labels, dtype=torch.int64, device=input.device)

        assert input.dim() == 1
        if not self.one_hot:
            with torch.no_grad():
                mask = torch.zeros_like(input)
                if labels.size(0) > 0:
                    ones = torch.ones_like(labels, dtype=torch.float32)
                    mask.scatter_(0, labels, ones)
            labels = mask

        return self.bce(input, labels).sum(dim=-1).mean() 
Example #26
Source File: test_rpn_heads.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def test_build_rpn_heads(self):
        ''' Make sure rpn heads run '''

        self.assertGreater(len(registry.RPN_HEADS), 0)

        in_channels = 64
        num_anchors = 10

        for name, builder in registry.RPN_HEADS.items():
            print('Testing {}...'.format(name))
            if name in RPN_CFGS:
                cfg = load_config(RPN_CFGS[name])
            else:
                # Use default config if config file is not specified
                cfg = copy.deepcopy(g_cfg)

            rpn = builder(cfg, in_channels, num_anchors)

            N, C_in, H, W = 2, in_channels, 24, 32
            input = torch.rand([N, C_in, H, W], dtype=torch.float32)
            LAYERS = 3
            out = rpn([input] * LAYERS)
            self.assertEqual(len(out), 2)
            logits, bbox_reg = out
            for idx in range(LAYERS):
                self.assertEqual(
                    logits[idx].shape,
                    torch.Size([
                        input.shape[0], num_anchors,
                        input.shape[2], input.shape[3],
                    ])
                )
                self.assertEqual(
                    bbox_reg[idx].shape,
                    torch.Size([
                        logits[idx].shape[0], num_anchors * 4,
                        logits[idx].shape[2], logits[idx].shape[3],
                    ]),
                ) 
Example #27
Source File: test_feature_extractors.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def _test_feature_extractors(
    self, extractors, overwrite_cfgs, overwrite_in_channels
):
    ''' Make sure roi box feature extractors run '''

    self.assertGreater(len(extractors), 0)

    in_channels_default = 64

    for name, builder in extractors.items():
        print('Testing {}...'.format(name))
        if name in overwrite_cfgs:
            cfg = load_config(overwrite_cfgs[name])
        else:
            # Use default config if config file is not specified
            cfg = copy.deepcopy(g_cfg)

        in_channels = overwrite_in_channels.get(
            name, in_channels_default)

        fe = builder(cfg, in_channels)
        self.assertIsNotNone(
            getattr(fe, 'out_channels', None),
            'Need to provide out_channels for feature extractor {}'.format(name)
        )

        N, C_in, H, W = 2, in_channels, 24, 32
        input = torch.rand([N, C_in, H, W], dtype=torch.float32)
        bboxes = [[1, 1, 10, 10], [5, 5, 8, 8], [2, 2, 3, 4]]
        img_size = [384, 512]
        box_list = BoxList(bboxes, img_size, "xyxy")
        out = fe([input], [box_list] * N)
        self.assertEqual(
            out.shape[:2],
            torch.Size([N * len(bboxes), fe.out_channels])
        ) 
Example #28
Source File: test_backbones.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def test_build_backbones(self):
        ''' Make sure backbones run '''

        self.assertGreater(len(registry.BACKBONES), 0)

        for name, backbone_builder in registry.BACKBONES.items():
            print('Testing {}...'.format(name))
            if name in BACKBONE_CFGS:
                cfg = load_config(BACKBONE_CFGS[name])
            else:
                # Use default config if config file is not specified
                cfg = copy.deepcopy(g_cfg)
            backbone = backbone_builder(cfg)

            # make sures the backbone has `out_channels`
            self.assertIsNotNone(
                getattr(backbone, 'out_channels', None),
                'Need to provide out_channels for backbone {}'.format(name)
            )

            N, C_in, H, W = 2, 3, 224, 256
            input = torch.rand([N, C_in, H, W], dtype=torch.float32)
            out = backbone(input)
            for cur_out in out:
                self.assertEqual(
                    cur_out.shape[:2],
                    torch.Size([N, backbone.out_channels])
                ) 
Example #29
Source File: test_predictors.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def _test_predictors(
    self, predictors, overwrite_cfgs, overwrite_in_channels,
    hwsize,
):
    ''' Make sure predictors run '''

    self.assertGreater(len(predictors), 0)

    in_channels_default = 64

    for name, builder in predictors.items():
        print('Testing {}...'.format(name))
        if name in overwrite_cfgs:
            cfg = load_config(overwrite_cfgs[name])
        else:
            # Use default config if config file is not specified
            cfg = copy.deepcopy(g_cfg)

        in_channels = overwrite_in_channels.get(
            name, in_channels_default)

        fe = builder(cfg, in_channels)

        N, C_in, H, W = 2, in_channels, hwsize, hwsize
        input = torch.rand([N, C_in, H, W], dtype=torch.float32)
        out = fe(input)
        yield input, out, cfg 
Example #30
Source File: test_fbnet.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def _test_primitive(self, device, op_name, op_func, N, C_in, C_out, expand, stride):
    op = op_func(C_in, C_out, expand, stride).to(device)
    input = torch.rand([N, C_in, 7, 7], dtype=torch.float32).to(device)
    output = op(input)
    self.assertEqual(
        output.shape[:2], torch.Size([N, C_out]),
        'Primitive {} failed for shape {}.'.format(op_name, input.shape)
    )