Python scipy.ndimage.zoom() Examples

The following are 30 code examples of scipy.ndimage.zoom(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.ndimage , or try the search function .
Example #1
Source File: shape_ops.py    From deep_pipe with MIT License 6 votes vote down vote up
def proportional_zoom_to_shape(x: np.ndarray, shape: AxesLike, axes: AxesLike = None,
                               padding_values: Union[AxesParams, Callable] = 0, order: int = 1) -> np.ndarray:
    """
    Proportionally rescale ``x`` to fit ``shape`` along ``axes`` then pad it to that shape.

    Parameters
    ----------
    x
    shape
        final shape.
    axes
        axes along which ``x`` will be padded. If None - the last ``len(shape)`` axes are used.
    padding_values
        values to pad with.
    order
        order of interpolation.
    """
    axes = expand_axes(axes, shape)
    scale_factor = (np.array(shape, 'float64') / extract(x.shape, axes)).min()
    return pad_to_shape(zoom(x, scale_factor, axes, order), shape, axes, padding_values) 
Example #2
Source File: models.py    From pysaliency with MIT License 6 votes vote down vote up
def _log_density(self, stimulus):
        smap = self.parent_model.log_density(stimulus)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
Example #3
Source File: make_imagenet_64_c.py    From robustness with Apache License 2.0 6 votes vote down vote up
def clipped_zoom(img, zoom_factor):
    h = img.shape[0]
    # ceil crop height(= crop width)
    ch = int(np.ceil(h / zoom_factor))

    top = (h - ch) // 2
    img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    # trim off any extra pixels
    trim_top = (img.shape[0] - h) // 2

    return img[trim_top:trim_top + h, trim_top:trim_top + h]


# /////////////// End Distortion Helpers ///////////////


# /////////////// Distortions /////////////// 
Example #4
Source File: make_tinyimagenet_c.py    From robustness with Apache License 2.0 6 votes vote down vote up
def clipped_zoom(img, zoom_factor):
    h = img.shape[0]
    # ceil crop height(= crop width)
    ch = int(np.ceil(h / zoom_factor))

    top = (h - ch) // 2
    img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    # trim off any extra pixels
    trim_top = (img.shape[0] - h) // 2

    return img[trim_top:trim_top + h, trim_top:trim_top + h]


# /////////////// End Distortion Helpers ///////////////


# /////////////// Distortions /////////////// 
Example #5
Source File: make_cifar_c.py    From robustness with Apache License 2.0 6 votes vote down vote up
def clipped_zoom(img, zoom_factor):
    h = img.shape[0]
    # ceil crop height(= crop width)
    ch = int(np.ceil(h / zoom_factor))

    top = (h - ch) // 2
    img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    # trim off any extra pixels
    trim_top = (img.shape[0] - h) // 2

    return img[trim_top:trim_top + h, trim_top:trim_top + h]


# /////////////// End Distortion Helpers ///////////////


# /////////////// Distortions /////////////// 
Example #6
Source File: make_imagenet_c.py    From robustness with Apache License 2.0 6 votes vote down vote up
def clipped_zoom(img, zoom_factor):
    h = img.shape[0]
    # ceil crop height(= crop width)
    ch = int(np.ceil(h / zoom_factor))

    top = (h - ch) // 2
    img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    # trim off any extra pixels
    trim_top = (img.shape[0] - h) // 2

    return img[trim_top:trim_top + h, trim_top:trim_top + h]


# /////////////// End Distortion Helpers ///////////////


# /////////////// Distortions /////////////// 
Example #7
Source File: models.py    From pysaliency with MIT License 6 votes vote down vote up
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None):
        smap = self.parent_model.conditional_log_density(stimulus, x_hist, y_hist, t_hist, attributes=attributes, out=out)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
Example #8
Source File: evaluate.py    From pytorch-segmentation-toolbox with MIT License 6 votes vote down vote up
def predict_multiscale(net, image, tile_size, scales, classes, flip_evaluation, recurrence):
    """
    Predict an image by looking at it with different scales.
        We choose the "predict_whole_img" for the image with less than the original input size,
        for the input of larger size, we would choose the cropping method to ensure that GPU memory is enough.
    """
    image = image.data
    N_, C_, H_, W_ = image.shape
    full_probs = np.zeros((H_, W_, classes))  
    for scale in scales:
        scale = float(scale)
        print("Predicting image scaled by %f" % scale)
        scale_image = ndimage.zoom(image, (1.0, 1.0, scale, scale), order=1, prefilter=False)
        scaled_probs = predict_whole(net, scale_image, tile_size, recurrence)
        if flip_evaluation == True:
            flip_scaled_probs = predict_whole(net, scale_image[:,:,:,::-1].copy(), tile_size, recurrence)
            scaled_probs = 0.5 * (scaled_probs + flip_scaled_probs[:,::-1,:])
        full_probs += scaled_probs
    full_probs /= len(scales)
    return full_probs 
Example #9
Source File: inference.py    From pytorch_segmentation with MIT License 6 votes vote down vote up
def multi_scale_predict(model, image, scales, num_classes, device, flip=False):
    input_size = (image.size(2), image.size(3))
    upsample = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
    total_predictions = np.zeros((num_classes, image.size(2), image.size(3)))

    image = image.data.data.cpu().numpy()
    for scale in scales:
        scaled_img = ndimage.zoom(image, (1.0, 1.0, float(scale), float(scale)), order=1, prefilter=False)
        scaled_img = torch.from_numpy(scaled_img).to(device)
        scaled_prediction = upsample(model(scaled_img).cpu())

        if flip:
            fliped_img = scaled_img.flip(-1).to(device)
            fliped_predictions = upsample(model(fliped_img).cpu())
            scaled_prediction = 0.5 * (fliped_predictions.flip(-1) + scaled_prediction)
        total_predictions += scaled_prediction.data.cpu().numpy().squeeze(0)

    total_predictions /= len(scales)
    return total_predictions 
Example #10
Source File: val.py    From Fast_Seg with Apache License 2.0 6 votes vote down vote up
def predict_multiscale(net, image, tile_size, scales, classes, flip_evaluation):
    """
    Predict an image by looking at it with different scales.
        We choose the "predict_whole_img" for the image with less than the original input size,
        for the input of larger size, we would choose the cropping method to ensure that GPU memory is enough.
    """
    image = image.data
    N_, C_, H_, W_ = image.shape
    full_probs = np.zeros((H_, W_, classes))
    for scale in scales:
        scale = float(scale)
        print("Predicting image scaled by %f" % scale)
        scale_image = ndimage.zoom(image, (1.0, 1.0, scale, scale), order=1, prefilter=False)
        scaled_probs = predict_whole(net, scale_image, tile_size)
        if flip_evaluation == True:
            flip_scaled_probs = predict_whole(net, scale_image[:, :, :, ::-1].copy(), tile_size)
            scaled_probs = 0.5 * (scaled_probs + flip_scaled_probs[:, ::-1, :])
        full_probs += scaled_probs
    full_probs /= len(scales)
    return full_probs 
Example #11
Source File: saliency_map_models.py    From pysaliency with MIT License 6 votes vote down vote up
def _saliency_map(self, stimulus):
        smap = self.parent_model.saliency_map(stimulus)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            assert smap.shape == target_shape

        return smap 
Example #12
Source File: perturbation.py    From alibi-detect with Apache License 2.0 6 votes vote down vote up
def clipped_zoom(x: np.ndarray, zoom_factor: float) -> np.ndarray:
    """
    Helper function for zoom blur.

    Parameters
    ----------
    x
        Instance to be perturbed.
    zoom_factor
        Zoom strength.

    Returns
    -------
    Cropped and zoomed instance.
    """
    h = x.shape[0]
    ch = int(np.ceil(h / float(zoom_factor)))  # ceil crop height(= crop width)
    top = (h - ch) // 2
    x = zoom(x[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    trim_top = (x.shape[0] - h) // 2  # trim off any extra pixels
    return x[trim_top:trim_top + h, trim_top:trim_top + h] 
Example #13
Source File: make_imagenet_c_inception.py    From robustness with Apache License 2.0 6 votes vote down vote up
def clipped_zoom(img, zoom_factor):
    h = img.shape[0]
    # ceil crop height(= crop width)
    ch = int(np.ceil(h / zoom_factor))

    top = (h - ch) // 2
    img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    # trim off any extra pixels
    trim_top = (img.shape[0] - h) // 2

    return img[trim_top:trim_top + h, trim_top:trim_top + h]


# /////////////// End Distortion Helpers ///////////////


# /////////////// Distortions /////////////// 
Example #14
Source File: __init__.py    From grib-doctor with MIT License 6 votes vote down vote up
def handleBands(data, snapshape):
    import numpy as np
    from scipy.ndimage import zoom
    try:
        data[np.where(data.mask == True)] = data.min()
    except AttributeError:
        pass
    if data.shape != snapshape:
        data = handleArrays(data)
        data = zoom(data, 2 * snapshape[1] / data.shape[1], order=1)
        data = ((np.roll(data, 1, axis=0) + data) / 2)[1:]

    else:
        data = handleArrays(data)

    return data 
Example #15
Source File: evaluate.py    From CCNet with MIT License 6 votes vote down vote up
def predict_multiscale(net, image, tile_size, scales, classes, flip_evaluation, recurrence):
    """
    Predict an image by looking at it with different scales.
        We choose the "predict_whole_img" for the image with less than the original input size,
        for the input of larger size, we would choose the cropping method to ensure that GPU memory is enough.
    """
    image = image.data
    N_, C_, H_, W_ = image.shape
    full_probs = np.zeros((H_, W_, classes))  
    for scale in scales:
        scale = float(scale)
        print("Predicting image scaled by %f" % scale)
        scale_image = ndimage.zoom(image, (1.0, 1.0, scale, scale), order=1, prefilter=False)
        scaled_probs = predict_whole(net, scale_image, tile_size, recurrence)
        if flip_evaluation == True:
            flip_scaled_probs = predict_whole(net, scale_image[:,:,:,::-1].copy(), tile_size, recurrence)
            scaled_probs = 0.5 * (scaled_probs + flip_scaled_probs[:,::-1,:])
        full_probs += scaled_probs
    full_probs /= len(scales)
    return full_probs 
Example #16
Source File: gridworld.py    From qmap with MIT License 6 votes vote down vote up
def render(self, mode='human', close=False):
        if close:
            if self.viewer is not None:
                self.viewer.close()
                self.viewer = None
        img = self.get_obs()[0]
        if mode == 'rgb_array':
            return img
        elif mode == 'human':
            from gym.envs.classic_control import rendering
            from scipy.ndimage import zoom
            if self.viewer is None:
                self.viewer = rendering.SimpleImageViewer()
            img = zoom(img, [5, 5, 1], order=0)
            self.viewer.imshow(img)
        else:
            raise NotImplementedError

    # Generate ground truth Q-frames by finding the smallest number of steps towards all coordinates given a window position. 
Example #17
Source File: load_data.py    From kaggle-galaxies with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def im_rescale_old(img, scale_factor):
    zoomed_img = np.zeros_like(img, dtype=img.dtype)

    if img.ndim == 2:
        z = (scale_factor, scale_factor)
    elif img.ndim == 3:
        z = (scale_factor, scale_factor, 1)
    # else fail
    zoomed = ndimage.zoom(img, z)

    if scale_factor >= 1.0:
        shift_x = (zoomed.shape[0] - img.shape[0]) // 2
        shift_y = (zoomed.shape[1] - img.shape[1]) // 2
        zoomed_img[:,:] = zoomed[shift_x:shift_x+img.shape[0], shift_y:shift_y+img.shape[1]]
    else:
        shift_x = (img.shape[0] - zoomed.shape[0]) // 2
        shift_y = (img.shape[1] - zoomed.shape[1]) // 2
        zoomed_img[shift_x:shift_x+zoomed.shape[0], shift_y:shift_y+zoomed.shape[1]] = zoomed

    return zoomed_img 
Example #18
Source File: load_data.py    From kaggle-galaxies with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def im_rescale(img, scale_factor):
    zoomed_img = np.zeros_like(img, dtype=img.dtype)
    zoomed = skimage.transform.rescale(img, scale_factor)

    if scale_factor >= 1.0:
        shift_x = (zoomed.shape[0] - img.shape[0]) // 2
        shift_y = (zoomed.shape[1] - img.shape[1]) // 2
        zoomed_img[:,:] = zoomed[shift_x:shift_x+img.shape[0], shift_y:shift_y+img.shape[1]]
    else:
        shift_x = (img.shape[0] - zoomed.shape[0]) // 2
        shift_y = (img.shape[1] - zoomed.shape[1]) // 2
        zoomed_img[shift_x:shift_x+zoomed.shape[0], shift_y:shift_y+zoomed.shape[1]] = zoomed

    return zoomed_img


# this old version uses ndimage zoom which is unreliable 
Example #19
Source File: imgcorruptlike.py    From imgaug with MIT License 6 votes vote down vote up
def _clipped_zoom_no_scipy_warning(img, zoom_factor):
    from scipy.ndimage import zoom as scizoom

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", ".*output shape of zoom.*")

        # clipping along the width dimension:
        ch0 = int(np.ceil(img.shape[0] / float(zoom_factor)))
        top0 = (img.shape[0] - ch0) // 2

        # clipping along the height dimension:
        ch1 = int(np.ceil(img.shape[1] / float(zoom_factor)))
        top1 = (img.shape[1] - ch1) // 2

        img = scizoom(img[top0:top0 + ch0, top1:top1 + ch1],
                      (zoom_factor, zoom_factor, 1), order=1)

        return img 
Example #20
Source File: deep_dream.py    From PyTorch-Deep-Dream with MIT License 6 votes vote down vote up
def deep_dream(image, model, iterations, lr, octave_scale, num_octaves):
    """ Main deep dream method """
    image = preprocess(image).unsqueeze(0).cpu().data.numpy()

    # Extract image representations for each octave
    octaves = [image]
    for _ in range(num_octaves - 1):
        octaves.append(nd.zoom(octaves[-1], (1, 1, 1 / octave_scale, 1 / octave_scale), order=1))

    detail = np.zeros_like(octaves[-1])
    for octave, octave_base in enumerate(tqdm.tqdm(octaves[::-1], desc="Dreaming")):
        if octave > 0:
            # Upsample detail to new octave dimension
            detail = nd.zoom(detail, np.array(octave_base.shape) / np.array(detail.shape), order=1)
        # Add deep dream detail from previous octave to new base
        input_image = octave_base + detail
        # Get new deep dream image
        dreamed_image = dream(input_image, model, iterations, lr)
        # Extract deep dream details
        detail = dreamed_image - octave_base

    return deprocess(dreamed_image) 
Example #21
Source File: preprocessing.py    From PSPNet-Keras-tensorflow with MIT License 6 votes vote down vote up
def generate(values, nb_classes, batch_size, input_size, image_dir, anno_dir):
  while 1:
    random.shuffle(values)
    images, labels = update_inputs(batch_size=batch_size,
       input_size=input_size, num_classes=nb_classes)
    for i, d in enumerate(values):
      img = imresize(imread(os.path.join(image_dir, d['image']), mode='RGB'), input_size)
      y = imread(os.path.join(anno_dir, d['anno']), mode='L')
      h, w = input_size
      y = zoom(y, (1.*h/y.shape[0], 1.*w/y.shape[1]), order=1, prefilter=False)
      y = (np.arange(nb_classes) == y[:,:,None]).astype('float32')
      assert y.shape[2] == nb_classes
      images[i % batch_size] = img
      labels[i % batch_size] = y
      if (i + 1) % batch_size == 0:
        yield images, labels
        images, labels = update_inputs(batch_size=batch_size,
          input_size=input_size, num_classes=nb_classes) 
Example #22
Source File: pspnet-video.py    From PSPNet-Keras-tensorflow with MIT License 6 votes vote down vote up
def predict_multi_scale(full_image, net, scales, sliding_evaluation, flip_evaluation):
    """Predict an image by looking at it with different scales."""
    classes = net.model.outputs[0].shape[3]
    full_probs = np.zeros((full_image.shape[0], full_image.shape[1], classes))
    h_ori, w_ori = full_image.shape[:2]
    for scale in scales:
        print("Predicting image scaled by %f" % scale)
        scaled_img = misc.imresize(full_image, size=scale, interp="bilinear")
        if sliding_evaluation:
            scaled_probs = predict_sliding(scaled_img, net, flip_evaluation)
        else:
            scaled_probs = net.predict(scaled_img, flip_evaluation)
        # scale probs up to full size
        h, w = scaled_probs.shape[:2]
        probs = ndimage.zoom(scaled_probs, (1.*h_ori/h, 1.*w_ori/w, 1.),order=1, prefilter=False)
        # visualize_prediction(probs)
        # integrate probs over all scales
        full_probs += probs
    full_probs /= len(scales)
    return full_probs 
Example #23
Source File: test_rand_zoomd.py    From MONAI with Apache License 2.0 6 votes vote down vote up
def test_correct_results(self, min_zoom, max_zoom, mode, align_corners, keep_size):
        key = "img"
        random_zoom = RandZoomd(
            key,
            prob=1.0,
            min_zoom=min_zoom,
            max_zoom=max_zoom,
            mode=mode,
            align_corners=align_corners,
            keep_size=keep_size,
        )
        random_zoom.set_random_state(1234)

        zoomed = random_zoom({key: self.imt[0]})
        expected = list()
        for channel in self.imt[0]:
            expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode="nearest", order=0, prefilter=False))
        expected = np.stack(expected).astype(np.float32)
        np.testing.assert_allclose(expected, zoomed[key], atol=1.0) 
Example #24
Source File: test_datatypes.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_uint64_max():
    # Test interpolation respects uint64 max.  Reported to fail at least on
    # win32 (due to the 32 bit visual C compiler using signed int64 when
    # converting between uint64 to double) and Debian on s390x.
    # Interpolation is always done in double precision floating point, so we
    # use the largest uint64 value for which int(float(big)) still fits in
    # a uint64.
    big = 2**64-1025
    arr = np.array([big, big, big], dtype=np.uint64)
    # Tests geometric transform (map_coordinates, affine_transform)
    inds = np.indices(arr.shape) - 0.1
    x = ndimage.map_coordinates(arr, inds)
    assert_equal(x[1], int(float(big)))
    assert_equal(x[2], int(float(big)))
    # Tests zoom / shift
    x = ndimage.shift(arr, 0.1)
    assert_equal(x[1], int(float(big)))
    assert_equal(x[2], int(float(big))) 
Example #25
Source File: landmark_dmbl_predict.py    From Cytomine-python-datamining with Apache License 2.0 6 votes vote down vote up
def agregation_phase_2(repository, image_number, ip, probability_maps, reg, delta, feature_offsets, filter_size, beta, n_iterations):
	img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
	(h, w, nldms) = probability_maps.shape
	nldms -= 1
	mh = h - 1
	mw = w - 1
	for iteration in range(n_iterations):
		y, x = np.where(probability_maps[:, :, ip] >= beta * np.max(probability_maps[:, :, ip]))
		dataset = dataset_from_coordinates(img, x + 1, y + 1, feature_offsets)
		offsets = reg.predict(dataset)
		n_x = (x - offsets[:, 0]).clip(min=0, max=mw)
		n_y = (y - offsets[:, 1]).clip(min=0, max=mh)
		new_pmap = np.zeros((h, w))
		for i in range(n_x.size):
			new_pmap[n_y[i], n_x[i]] += probability_maps[y[i], x[i], ip]
		probability_maps[:, :, ip] = new_pmap
		probability_maps[0, :, ip] = 0
		probability_maps[:, 0, ip] = 0
		probability_maps[mh, :, ip] = 0
		probability_maps[:, mw, ip] = 0

	return filter_perso(probability_maps[:, :, ip], filter_size) 
Example #26
Source File: shape_ops.py    From deep_pipe with MIT License 6 votes vote down vote up
def zoom(x: np.ndarray, scale_factor: AxesParams, axes: AxesLike = None, order: int = 1,
         fill_value: Union[float, Callable] = 0) -> np.ndarray:
    """
    Rescale ``x`` according to ``scale_factor`` along the ``axes``.

    Parameters
    ----------
    x
    scale_factor
    axes
        axes along which the tensor will be scaled. If None - the last ``len(shape)`` axes are used.
    order
        order of interpolation.
    fill_value
        value to fill past edges. If Callable (e.g. `numpy.min`) - ``fill_value(x)`` will be used.
    """
    scale_factor = fill_by_indices(np.ones(x.ndim, 'float64'), scale_factor, axes)
    if callable(fill_value):
        fill_value = fill_value(x)

    # remove an annoying warning
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', UserWarning)
        return ndimage.zoom(x, scale_factor, order=order, cval=fill_value) 
Example #27
Source File: shape_ops.py    From deep_pipe with MIT License 6 votes vote down vote up
def zoom_to_shape(x: np.ndarray, shape: AxesLike, axes: AxesLike = None, order: int = 1,
                  fill_value: Union[float, Callable] = 0) -> np.ndarray:
    """
    Rescale ``x`` to match ``shape`` along the ``axes``.

    Parameters
    ----------
    x
    shape
        final shape.
    axes
        axes along which the tensor will be scaled. If None - the last ``len(shape)`` axes are used.
    order
        order of interpolation.
    fill_value
        value to fill past edges. If Callable (e.g. `numpy.min`) - ``fill_value(x)`` will be used.
    """
    old_shape = np.array(x.shape, 'float64')
    new_shape = np.array(fill_by_indices(x.shape, shape, axes), 'float64')
    return zoom(x, new_shape / old_shape, order=order, fill_value=fill_value) 
Example #28
Source File: actviz.py    From gandissect with MIT License 6 votes vote down vote up
def zoom_image(img, source_rect, target_shape=None):
    """Zooms pixels from the source_rect of img to target_shape."""
    import warnings
    from scipy.ndimage import zoom
    if target_shape is None:
        target_shape = img.shape
    st, sb, sl, sr = source_rect
    source = img[st:sb, sl:sr]
    if source.shape == target_shape:
        return source
    zoom_tuple = tuple(float(t) / s
            for t, s in zip(target_shape, source.shape[:2])
            ) + (1,) * (img.ndim - 2)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', UserWarning) # "output shape of zoom"
        target = zoom(source, zoom_tuple)
    assert target.shape[:2] == target_shape, (target.shape, target_shape)
    return target 
Example #29
Source File: build_dmbl_model.py    From Cytomine-python-datamining with Apache License 2.0 5 votes vote down vote up
def image_dataset_phase_2(repository, image_number, x, y, feature_offsets, R_offsets, delta):
	img = makesize(snd.zoom(readimage(repository, image_number), delta), 1)
	(h, w) = img.shape
	mask = np.ones((h, w), 'bool')
	mask[:, 0] = 0
	mask[0, :] = 0
	mask[h - 1, :] = 0
	mask[:, w - 1] = 0
	(nroff, blc) = R_offsets.shape
	h -= 2
	w -= 2
	x += 1
	y += 1
	rep = np.zeros((nroff, 2))
	number = image_number
	xs = (x + R_offsets[:, 0]).astype('int')
	ys = (y + R_offsets[:, 1]).astype('int')
	rep[:, 0] = R_offsets[:, 0]
	rep[:, 1] = R_offsets[:, 1]
	dataset = dataset_from_coordinates(img, xs, ys, feature_offsets)
	return dataset, rep, number 
Example #30
Source File: perturbation.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def zoom_blur(x: np.ndarray, max_zoom: float, step_zoom: float, xrange: tuple = None) -> np.ndarray:
    """
    Apply zoom blur.

    Parameters
    ----------
    x
        Instance to be perturbed.
    max_zoom
        Max zoom strength.
    step_zoom
        Step size to go from 1 to `max_zoom` strength.
    xrange
        Tuple with min and max data range.

    Returns
    -------
    Perturbed instance.
    """
    x, scale_back = scale_minmax(x, xrange)
    zoom_factors = np.arange(1, max_zoom, step_zoom)
    out = np.zeros_like(x)
    for zoom_factor in zoom_factors:
        out += clipped_zoom(x, zoom_factor)
    x_z = (x + out) / (len(zoom_factors) + 1)
    if scale_back:
        x_z = x_z * (xrange[1] - xrange[0]) + xrange[0]
    if isinstance(xrange, tuple):
        return np.clip(x_z, xrange[0], xrange[1])
    else:
        return x_z