Python random.uniform() Examples

The following are 30 code examples of random.uniform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module random , or try the search function .
Example #1
Source File: datasets.py    From pruning_yolov3 with GNU General Public License v3.0 7 votes vote down vote up
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    x = (np.random.uniform(-1, 1, 3) * np.array([hgain, sgain, vgain]) + 1).astype(np.float32)  # random gains
    img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x.reshape((1, 1, 3))).clip(None, 255).astype(np.uint8)
    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed


# def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):  # original version
#     # SV augmentation by 50%
#     img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)  # hue, sat, val
#
#     S = img_hsv[:, :, 1].astype(np.float32)  # saturation
#     V = img_hsv[:, :, 2].astype(np.float32)  # value
#
#     a = random.uniform(-1, 1) * sgain + 1
#     b = random.uniform(-1, 1) * vgain + 1
#     S *= a
#     V *= b
#
#     img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
#     img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
#     cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed 
Example #2
Source File: demo_sampler_wrapper.py    From robosuite with MIT License 6 votes vote down vote up
def sample(self):
        """
        This is the core sampling method. Samples a state from a
        demonstration, in accordance with the configuration.
        """

        # chooses a sampling scheme randomly based on the mixing ratios
        seed = random.uniform(0, 1)
        ratio = np.cumsum(self.scheme_ratios)
        ratio = ratio > seed
        for i, v in enumerate(ratio):
            if v:
                break

        sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
        return sample_method() 
Example #3
Source File: data_augmentation.py    From Yolo-v2-pytorch with MIT License 6 votes vote down vote up
def __call__(self, data):
        image, label = data
        height, width = image.shape[:2]
        xmin = width
        ymin = height
        xmax = 0
        ymax = 0
        for lb in label:
            xmin = min(xmin, lb[0])
            ymin = min(ymin, lb[1])
            xmax = max(xmax, lb[2])
            ymax = max(ymax, lb[2])
        cropped_left = uniform(0, self.max_crop)
        cropped_right = uniform(0, self.max_crop)
        cropped_top = uniform(0, self.max_crop)
        cropped_bottom = uniform(0, self.max_crop)
        new_xmin = int(min(cropped_left * width, xmin))
        new_ymin = int(min(cropped_top * height, ymin))
        new_xmax = int(max(width - 1 - cropped_right * width, xmax))
        new_ymax = int(max(height - 1 - cropped_bottom * height, ymax))

        image = image[new_ymin:new_ymax, new_xmin:new_xmax, :]
        label = [[lb[0] - new_xmin, lb[1] - new_ymin, lb[2] - new_xmin, lb[3] - new_ymin, lb[4]] for lb in label]

        return image, label 
Example #4
Source File: parameters.py    From tensortrade with Apache License 2.0 6 votes vote down vote up
def random(base_price, t_gen, delta):
    return ModelParameters(
        all_s0=base_price,
        all_r0=0.5,
        all_time=t_gen,
        all_delta=delta,
        all_sigma=uniform(0.1, 0.8),
        gbm_mu=uniform(-0.3, 0.6),
        jumps_lamda=uniform(0.0071, 0.6),
        jumps_sigma=uniform(-0.03, 0.04),
        jumps_mu=uniform(-0.2, 0.2),
        cir_a=3.0,
        cir_mu=0.5,
        cir_rho=0.5,
        ou_a=3.0,
        ou_mu=0.5,
        heston_a=uniform(1, 5),
        heston_mu=uniform(0.156, 0.693),
        heston_vol0=0.06125
    ) 
Example #5
Source File: video_transforms.py    From DDPAE-video-prediction with MIT License 6 votes vote down vote up
def __call__(self, video):
    for attempt in range(10):
      area = video.shape[-3]*video.shape[-2]
      target_area = random.uniform(0.08, 1.0)*area
      aspect_ratio = random.uniform(3./4, 4./3)

      w = int(round(math.sqrt(target_area*aspect_ratio)))
      h = int(round(math.sqrt(target_area/aspect_ratio)))

      if random.random() < 0.5:
        w, h = h, w

      if w <= video.shape[-2] and h <= video.shape[-3]:
        x1 = random.randint(0, video.shape[-2]-w)
        y1 = random.randint(0, video.shape[-3]-h)

        video = video[..., y1:y1+h, x1:x1+w, :]

        return resize(video, (self.size, self.size), self.interpolation)

    # Fallback
    scale = Scale(self.size, interpolation=self.interpolation)
    crop = CenterCrop(self.size)
    return crop(scale(video)) 
Example #6
Source File: __init__.py    From controller with MIT License 6 votes vote down vote up
def fake_responses(request, context):
    responses = [
        # increasing the chance of 404
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'OK', 'status_code': 200},
        {'text': 'Gateway timeout', 'status_code': 504},
        {'text': 'Bad gateway', 'status_code': 502},
    ]
    random.shuffle(responses)
    response = responses.pop()

    context.status_code = response['status_code']
    context.reason = response['text']
    # Random float x, 1.0 <= x < 4.0 for some sleep jitter
    time.sleep(random.uniform(1, 4))
    return response['text'] 
Example #7
Source File: mock.py    From controller with MIT License 6 votes vote down vote up
def add_cleanup_pod(url):
    """populate the cleanup pod list"""
    # variance allows a pod to stay alive past grace period
    variance = random.uniform(0.1, 1.5)
    grace = round(settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS * variance)

    # save
    pods = cache.get('cleanup_pods', {})
    pods[url] = (datetime.utcnow() + timedelta(seconds=grace))
    cache.set('cleanup_pods', pods)

    # add grace period timestamp
    pod = cache.get(url)
    grace = settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS
    pd = datetime.utcnow() + timedelta(seconds=grace)
    timestamp = str(pd.strftime(MockSchedulerClient.DATETIME_FORMAT))
    pod['metadata']['deletionTimestamp'] = timestamp
    cache.set(url, pod) 
Example #8
Source File: transforms.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_params(img, scale, ratio):

        if type(img) == np.ndarray:
            img_h, img_w, img_c = img.shape
        else: 
            img_h, img_w = img.size
            img_c = len(img.getbands())

        s = random.uniform(*scale)
        # if you img_h != img_w you may need this.
        # r_1 = max(r_1, (img_h*s)/img_w)
        # r_2 = min(r_2, img_h / (img_w*s))
        r = random.uniform(*ratio)
        s = s * img_h * img_w
        w = int(math.sqrt(s / r))
        h = int(math.sqrt(s * r))
        left = random.randint(0, img_w - w)
        top = random.randint(0, img_h - h)

        return left, top, h, w, img_c 
Example #9
Source File: import_train_images.py    From L3C-PyTorch with GNU General Public License v3.0 6 votes vote down vote up
def random_resize(im, min_res: int, max_scale=_DEFAULT_MAX_SCALE):
    """Scale longer side to `min_res`, but only if that scales by <= max_scale."""
    W, H = im.size
    D = min(W, H)
    scale_min = min_res / D
    # Image is too small to downscale by a factor smaller MAX_SCALE.
    if scale_min > max_scale:
        return None

    # Get a random scale for new size.
    scale = random.uniform(scale_min, max_scale)
    new_size = round(W * scale), round(H * scale)
    try:
        # Using LANCZOS!
        return im.resize(new_size, resample=PIL.Image.LANCZOS)
    except OSError as e:  # Happens for corrupted images
        print('*** Caught im.resize error', e)
        return None 
Example #10
Source File: transforms.py    From DeepLung with GNU General Public License v3.0 6 votes vote down vote up
def __call__(self, img):
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.08, 1.0) * area
            aspect_ratio = random.uniform(3. / 4, 4. / 3)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
                x1 = random.randint(0, img.size[0] - w)
                y1 = random.randint(0, img.size[1] - h)

                img = img.crop((x1, y1, x1 + w, y1 + h))
                assert(img.size == (w, h))

                return img.resize((self.size, self.size), self.interpolation)

        # Fallback
        scale = Scale(self.size, interpolation=self.interpolation)
        crop = CenterCrop(self.size)
        return crop(scale(img)) 
Example #11
Source File: random_resized_crop.py    From argus-freesound with MIT License 6 votes vote down vote up
def get_params(img, scale, ratio):
        """Get parameters for ``crop`` for a random sized crop.
        Args:
            img (PIL Image): Image to be cropped.
            scale (tuple): range of size of the origin size cropped
            ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
                sized crop.
        """
        area = img.size[0] * img.size[1]

        for attempt in range(10):
            target_area = random.uniform(*scale) * area
            log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
            aspect_ratio = math.exp(random.uniform(*log_ratio))

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if w <= img.size[0] and h <= img.size[1]:
                i = random.randint(0, img.size[1] - h)
                j = random.randint(0, img.size[0] - w)
                return i, j, h, w

        # Fallback to central crop
        in_ratio = img.size[0] / img.size[1]
        if (in_ratio < min(ratio)):
            w = img.size[0]
            h = w / min(ratio)
        elif (in_ratio > max(ratio)):
            h = img.size[1]
            w = h * max(ratio)
        else:  # whole image
            w = img.size[0]
            h = img.size[1]
        i = (img.size[1] - h) // 2
        j = (img.size[0] - w) // 2
        return i, j, h, w 
Example #12
Source File: opencv.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
    """Randomly crop src with size. Randomize area and aspect ratio"""
    h, w, _ = src.shape
    area = w*h
    for _ in range(10):
        new_area = random.uniform(min_area, 1.0) * area
        new_ratio = random.uniform(*ratio)
        new_w = int(new_area*new_ratio)
        new_h = int(new_area/new_ratio)

        if random.uniform(0., 1.) < 0.5:
            new_w, new_h = new_h, new_w

        if new_w > w or new_h > h:
            continue

        x0 = random.randint(0, w - new_w)
        y0 = random.randint(0, h - new_h)

        out = fixed_crop(src, x0, y0, new_w, new_h, size)
        return out, (x0, y0, new_w, new_h)

    return random_crop(src, size) 
Example #13
Source File: transforms.py    From argus-freesound with MIT License 6 votes vote down vote up
def spec_augment(spec: np.ndarray,
                 num_mask=2,
                 freq_masking=0.15,
                 time_masking=0.20,
                 value=0):
    spec = spec.copy()
    num_mask = random.randint(1, num_mask)
    for i in range(num_mask):
        all_freqs_num, all_frames_num  = spec.shape
        freq_percentage = random.uniform(0.0, freq_masking)

        num_freqs_to_mask = int(freq_percentage * all_freqs_num)
        f0 = np.random.uniform(low=0.0, high=all_freqs_num - num_freqs_to_mask)
        f0 = int(f0)
        spec[f0:f0 + num_freqs_to_mask, :] = value

        time_percentage = random.uniform(0.0, time_masking)

        num_frames_to_mask = int(time_percentage * all_frames_num)
        t0 = np.random.uniform(low=0.0, high=all_frames_num - num_frames_to_mask)
        t0 = int(t0)
        spec[:, t0:t0 + num_frames_to_mask] = value
    return spec 
Example #14
Source File: opsautomatortest_service.py    From aws-ops-automator with Apache License 2.0 5 votes vote down vote up
def describe(self, as_tuple=None, **kwargs):
        """
        This method is to retrieve test resources, method parameters are only used signature compatibility
        :param as_tuple: Set to true to return results as immutable named dictionaries instead of dictionaries
        :return: Test resource
        """

        def create_resource(r):
            return {
                actions.ops_automator_test_action.TEST_RESOURCE_ID: OpsautomatortestService.resource_id(r),
                "AwsAccount": self.aws_account,
                "Region": kwargs["region"] if "region" in kwargs else self.region,
                "Service": self.service_name,
                "ResourceTypeName": actions.ops_automator_test_action.TEST_RESOURCE_NAMES[0],
                "Tags": self.tags
            }

        start = datetime.now()

        self._args = kwargs
        result = [create_resource(i) for i in sorted(self._number_of_resources)]

        if self._args.get(actions.ops_automator_test_action.PARAM_TEST_SELECT_FAILING, False) in ["True", True]:
            raise Exception("Selection of resources fails")

        select_time = int(self._args.get(actions.ops_automator_test_action.PARAM_TEST_SELECT_DURATION, 0))

        if select_time != 0:
            variance = float(self._args.get(actions.ops_automator_test_action.PARAM_TEST_SELECT_DURATION_VARIANCE, 0))
            if variance != 0:
                select_time += (random.uniform(variance * -1, variance) * select_time)
            time_spend = (datetime.now() - start).total_seconds()
            if time_spend < select_time:
                time.sleep(select_time - time_spend)

        return result 
Example #15
Source File: data_augmentation.py    From Yolo-v2-pytorch with MIT License 5 votes vote down vote up
def __call__(self, data):
        image, label = data
        if uniform(0, 1) >= self.prob:
            image = cv2.flip(image, 1)
            width = image.shape[1]
            label = [[width - lb[2], lb[1], width - lb[0], lb[3], lb[4]] for lb in label]
        return image, label 
Example #16
Source File: train_deeplab3D.py    From pytorch-mri-segmentation-3D with MIT License 5 votes vote down vote up
def get_data_from_chunk_v2(chunk):

    main_folder_path = '../../Data/MS2017a/'
    scans_folder_path = main_folder_path + 'scans/'

    img_type_path = 'pre/FLAIR.nii.gz'
    gt_type_path = 'wmh.nii.gz'

    scale = random.uniform(0.5, 1.3)
    dim = int(scale*321)

    images = np.zeros((dim,dim, 1,len(chunk)))
    gt = np.zeros((dim,dim,1,len(chunk)))
    for i, piece in enumerate(chunk):
        print(os.path.join(main_folder_path, piece))
        img_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece))
        flip_p = random.uniform(0, 1)

        img_temp = cv2.resize(img_temp,(321,321)).astype(float)
        img_temp = img_temp.reshape([321, 321, 1])

        img_temp = scale_im(img_temp,scale)
        img_temp = flip(img_temp,flip_p)
        images[:,:,0,i] = img_temp

        piece_gt = piece.replace('slices', 'gt_slices').replace('FLAIR', 'wmh')
        gt_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece_gt), makebin = onlyLesions)
        gt_temp = cv2.resize(gt_temp,(321,321) , interpolation = cv2.INTER_NEAREST)
        gt_temp = gt_temp.reshape([321,321, 1])
        gt_temp = scale_gt(gt_temp,scale)
        gt_temp = flip(gt_temp,flip_p)

        gt[:,:,0,i] = gt_temp
        a = outS(321*scale)

    labels = [resize_label_batch(gt,i) for i in [a,a,a,a]]

    #from dim1 x dim2 x 1 x batch -> batch x 1 x dim1 x dim2
    images = images.transpose((3,2,0,1))
    images = torch.from_numpy(images).float()
    return images, labels 
Example #17
Source File: utils.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = img.new().resize_as_(img).zero_()
        alpha = random.uniform(-self.var, self.var)
        return img.lerp(gs, alpha) 
Example #18
Source File: utils.py    From ICDAR-2019-SROIE with MIT License 5 votes vote down vote up
def expand(image, boxes, filler):
    """
    Perform a zooming out operation by placing the image in a larger canvas of filler material.

    Helps to learn to detect smaller objects.

    :param image: image, a tensor of dimensions (3, original_h, original_w)
    :param boxes: bounding boxes in boundary coordinates, a tensor of dimensions (n_objects, 4)
    :param filler: RBG values of the filler material, a list like [R, G, B]
    :return: expanded image, updated bounding box coordinates
    """
    # Calculate dimensions of proposed expanded (zoomed-out) image
    original_h = image.size(1)
    original_w = image.size(2)
    max_scale = 4
    scale = random.uniform(1, max_scale)
    new_h = int(scale * original_h)
    new_w = int(scale * original_w)

    # Create such an image with the filler
    filler = torch.FloatTensor(filler)  # (3)
    new_image = torch.ones((3, new_h, new_w), dtype=torch.float) * filler.unsqueeze(1).unsqueeze(1)  # (3, new_h, new_w)
    # Note - do not use expand() like new_image = filler.unsqueeze(1).unsqueeze(1).expand(3, new_h, new_w)
    # because all expanded values will share the same memory, so changing one pixel will change all

    # Place the original image at random coordinates in this new image (origin at top-left of image)
    left = random.randint(0, new_w - original_w)
    right = left + original_w
    top = random.randint(0, new_h - original_h)
    bottom = top + original_h
    new_image[:, top:bottom, left:right] = image

    # Adjust bounding boxes' coordinates accordingly
    new_boxes = boxes + torch.FloatTensor([left, top, left, top]).unsqueeze(
        0)  # (n_objects, 4), n_objects is the no. of objects in this image

    return new_image, new_boxes 
Example #19
Source File: utils.py    From ICDAR-2019-SROIE with MIT License 5 votes vote down vote up
def photometric_distort(image):
    """
    Distort brightness, contrast, saturation, and hue, each with a 50% chance, in random order.

    :param image: image, a PIL Image
    :return: distorted image
    """
    new_image = image

    distortions = [FT.adjust_brightness,
                   FT.adjust_contrast,
                   FT.adjust_saturation,
                   FT.adjust_hue]

    random.shuffle(distortions)

    for d in distortions:
        if random.random() < 0.5:
            if d.__name__ is 'adjust_hue':
                # Caffe repo uses a 'hue_delta' of 18 - we divide by 255 because PyTorch needs a normalized value
                adjust_factor = random.uniform(-18 / 255., 18 / 255.)
            else:
                # Caffe repo uses 'lower' and 'upper' values of 0.5 and 1.5 for brightness, contrast, and saturation
                adjust_factor = random.uniform(0.5, 1.5)

            # Apply this distortion
            new_image = d(new_image, adjust_factor)

    return new_image 
Example #20
Source File: utils.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = Grayscale()(img)
        alpha = random.uniform(-self.var, self.var)
        return img.lerp(gs, alpha) 
Example #21
Source File: custom_transforms.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        rotate_degree = random.uniform(-1*self.degree, self.degree)
        img = img.rotate(rotate_degree, Image.BILINEAR)
        mask = mask.rotate(rotate_degree, Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example #22
Source File: preprocess.py    From PSMNet with MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = Grayscale()(img)
        alpha = random.uniform(0, self.var)
        return img.lerp(gs, alpha) 
Example #23
Source File: __init__.py    From aws-ops-automator with Apache License 2.0 5 votes vote down vote up
def _apply_randomness(value, random_factor):
    """
    Applies a random factor to the value
    :param value: Input value
    :param random_factor: Random factor, must be between 0 (no random) and 1 (output is between 0 and 2* value)
    :return: Value with random factor applied
    """
    if random_factor < 0 or random_factor > 1:
        raise ValueError("Random factor must be in range 0 to 1")
    return value + (random.uniform(random_factor * -1, random_factor) * value) if random_factor != 0 else value 
Example #24
Source File: transforms.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(mean, std):
        """Get parameters for gaussian noise
        Returns:
            sequence: params to be passed to the affine transformation
        """
        mean = random.uniform(0, mean)
        std = random.uniform(0, std)

        return mean, std 
Example #25
Source File: transforms.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(degrees, translate, scale_ranges, shears, img_size):
        """Get parameters for affine transformation

        Returns:
            sequence: params to be passed to the affine transformation
        """
        angle = random.uniform(degrees[0], degrees[1])
        if translate is not None:
            max_dx = translate[0] * img_size[1]
            max_dy = translate[1] * img_size[0]
            translations = (np.round(random.uniform(-max_dx, max_dx)),
                            np.round(random.uniform(-max_dy, max_dy)))
        else:
            translations = (0, 0)

        if scale_ranges is not None:
            scale = random.uniform(scale_ranges[0], scale_ranges[1])
        else:
            scale = 1.0

        if shears is not None:
            shear = random.uniform(shears[0], shears[1])
        else:
            shear = 0.0

        return angle, translations, scale, shear 
Example #26
Source File: transforms.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(degrees):
        """Get parameters for ``rotate`` for a random rotation.

        Returns:
            sequence: params to be passed to ``rotate`` for random rotation.
        """
        angle = random.uniform(degrees[0], degrees[1])

        return angle 
Example #27
Source File: transforms.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(img, scale, ratio):
        """Get parameters for ``crop`` for a random sized crop.

        Args:
            img (CV Image): Image to be cropped.
            scale (tuple): range of size of the origin size cropped
            ratio (tuple): range of aspect ratio of the origin aspect ratio cropped

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
                sized crop.
        """
        area = img.shape[0] * img.shape[1]

        for attempt in range(10):
            target_area = random.uniform(*scale) * area
            aspect_ratio = random.uniform(*ratio)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if w <= img.shape[1] and h <= img.shape[0]:
                i = random.randint(0, img.shape[0] - h)
                j = random.randint(0, img.shape[1] - w)
                return i, j, h, w

        # Fallback to central crop
        in_ratio = img.shape[1] / img.shape[0]
        if (in_ratio < min(ratio)):
            w = img.shape[1]
            h = int(round(w / min(ratio)))
        elif (in_ratio > max(ratio)):
            h = img.shape[0]
            w = int(round(h * max(ratio)))
        else:  # whole image
            w = img.shape[1]
            h = img.shape[0]
        i = (img.shape[0] - h) // 2
        j = (img.shape[1] - w) // 2
        return i, j, h, w 
Example #28
Source File: transforms.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(
            fov_range,
            anglex_ranges,
            angley_ranges,
            anglez_ranges,
            shear_ranges,
            translate,
            scale_ranges,
            img_size):
        """Get parameters for ``perspective`` for a random perspective transform.

        Returns:
            sequence: params to be passed to the perspective transformation
        """
        fov = 90 + random.uniform(-fov_range, fov_range)
        anglex = random.uniform(anglex_ranges[0], anglex_ranges[1])
        angley = random.uniform(angley_ranges[0], angley_ranges[1])
        anglez = random.uniform(anglez_ranges[0], anglez_ranges[1])
        shear = random.uniform(shear_ranges[0], shear_ranges[1])

        max_dx = translate[0] * img_size[1]
        max_dy = translate[1] * img_size[0]
        translations = (np.round(random.uniform(-max_dx, max_dx)),
                        np.round(random.uniform(-max_dy, max_dy)))

        scale = (random.uniform(1 / scale_ranges[0], scale_ranges[0]),
                 random.uniform(1 / scale_ranges[1], scale_ranges[1]))

        return fov, anglex, angley, anglez, shear, translations, scale 
Example #29
Source File: transforms.py    From tsn-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __call__(self, img_group):
        for attempt in range(10):
            area = img_group[0].size[0] * img_group[0].size[1]
            target_area = random.uniform(0.08, 1.0) * area
            aspect_ratio = random.uniform(3. / 4, 4. / 3)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
                x1 = random.randint(0, img_group[0].size[0] - w)
                y1 = random.randint(0, img_group[0].size[1] - h)
                found = True
                break
        else:
            found = False
            x1 = 0
            y1 = 0

        if found:
            out_group = list()
            for img in img_group:
                img = img.crop((x1, y1, x1 + w, y1 + h))
                assert(img.size == (w, h))
                out_group.append(img.resize((self.size, self.size), self.interpolation))
            return out_group
        else:
            # Fallback
            scale = GroupScale(self.size, interpolation=self.interpolation)
            crop = GroupRandomCrop(self.size)
            return crop(scale(img_group)) 
Example #30
Source File: segregation.py    From indras_net with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, name, goal, min_tol, max_tol, max_move=100,
                 max_detect=1):
        super().__init__(name, goal, max_move=max_move, max_detect=max_detect)
        self.tolerance = random.uniform(max_tol, min_tol)
        self.stance = None
        self.orientation = None
        self.visible_pre = None