Python tensorpack.imgaug.Flip() Examples
The following are 10
code examples of tensorpack.imgaug.Flip().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.imgaug
, or try the search function
.
Example #1
Source File: imagenet_utils.py From LQ-Nets with MIT License | 6 votes |
def normal_augmentor(isTrain): """ Normal augmentor with random crop and flip only, for BGR images in range [0,255]. """ if isTrain: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.RandomCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)), ] return augmentors
Example #2
Source File: data_loader.py From ADL with MIT License | 6 votes |
def fbresnet_augmentor(is_training, option): if is_training: augmentors = [ imgaug.ToFloat32(), imgaug.Resize((option.final_size + 32, option.final_size + 32)), imgaug.RandomCrop((option.final_size, option.final_size))] flip = [imgaug.Flip(horiz=True), imgaug.ToUint8()] augmentors.extend(flip) else: augmentors = [ imgaug.ToFloat32(), imgaug.Resize((option.final_size + 32, option.final_size + 32)), imgaug.CenterCrop((option.final_size, option.final_size)), imgaug.ToUint8()] return augmentors
Example #3
Source File: imagenet_utils.py From ghostnet with Apache License 2.0 | 5 votes |
def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ if isTrain: augmentors = [ GoogleNetResize(), # It's OK to remove the following augs if your CPU is not fast enough. # Removing brightness/contrast/saturation does not have a significant effect on accuracy. # Removing lighting leads to a tiny drop in accuracy. imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((224, 224)), ] return augmentors
Example #4
Source File: imagenet_utils.py From benchmarks with The Unlicense | 5 votes |
def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ interpolation = cv2.INTER_LINEAR if isTrain: """ Sec 5.1: We use scale and aspect ratio data augmentation [35] as in [12]. The network input image is a 224×224 pixel random crop from an augmented image or its horizontal flip. """ augmentors = [ imgaug.GoogleNetRandomCropAndResize(interp=interpolation), # It's OK to remove the following augs if your CPU is not fast enough. # Removing brightness/contrast/saturation does not have a significant effect on accuracy. # Removing lighting leads to a tiny drop in accuracy. imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), rgb=False, clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, interp=interpolation), imgaug.CenterCrop((224, 224)), ] return augmentors
Example #5
Source File: data_feeder.py From tf-lcnn with GNU General Public License v3.0 | 5 votes |
def get_ilsvrc_data_alexnet(is_train, image_size, batchsize, directory): if is_train: if not directory.startswith('/'): ds = ILSVRCTTenthTrain(directory) else: ds = ILSVRC12(directory, 'train') augs = [ imgaug.RandomApplyAug(imgaug.RandomResize((0.9, 1.2), (0.9, 1.2)), 0.7), imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.7), imgaug.RandomApplyAug(imgaug.RandomChooseAug([ imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01), imgaug.RandomOrderAug([ imgaug.BrightnessScale((0.8, 1.2), clip=False), imgaug.Contrast((0.8, 1.2), clip=False), # imgaug.Saturation(0.4, rgb=True), ]), ]), 0.7), imgaug.Flip(horiz=True), imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.RandomCrop((224, 224)), ] ds = AugmentImageComponent(ds, augs) ds = PrefetchData(ds, 1000, multiprocessing.cpu_count()) ds = BatchData(ds, batchsize) ds = PrefetchData(ds, 10, 4) else: if not directory.startswith('/'): ds = ILSVRCTenthValid(directory) else: ds = ILSVRC12(directory, 'val') ds = AugmentImageComponent(ds, [ imgaug.ResizeShortestEdge(224, cv2.INTER_CUBIC), imgaug.CenterCrop((224, 224)), ]) ds = PrefetchData(ds, 100, multiprocessing.cpu_count()) ds = BatchData(ds, batchsize) return ds
Example #6
Source File: imagenet_utils.py From webvision-2.0-benchmarks with Apache License 2.0 | 5 votes |
def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ if isTrain: augmentors = [ GoogleNetResize(), imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((224, 224)), ] return augmentors
Example #7
Source File: imagenet_utils.py From LQ-Nets with MIT License | 5 votes |
def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ if isTrain: augmentors = [ GoogleNetResize(), # It's OK to remove the following augs if your CPU is not fast enough. # Removing brightness/contrast/saturation does not have a significant effect on accuracy. # Removing lighting leads to a tiny drop in accuracy. imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((DEFAULT_IMAGE_SHAPE, DEFAULT_IMAGE_SHAPE)), ] return augmentors
Example #8
Source File: data.py From sequential-imagenet-dataloader with MIT License | 5 votes |
def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ if isTrain: augmentors = [ GoogleNetResize(), imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((224, 224)), ] return augmentors ##################################################################################################### #####################################################################################################
Example #9
Source File: data_helper.py From tf-mobilenet-v2 with MIT License | 5 votes |
def get_augmentations(is_train): if is_train: augmentors = [ GoogleNetResize(crop_area_fraction=0.76, target_shape=224), # TODO : 76% or 49%? imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=True), imgaug.Contrast((0.6, 1.4), clip=True), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC), imgaug.CenterCrop((224, 224)), ] return augmentors
Example #10
Source File: config.py From hover_net with MIT License | 4 votes |
def get_train_augmentors(self, input_shape, output_shape, view=False): print(input_shape, output_shape) shape_augs = [ imgaug.Affine( shear=5, # in degree scale=(0.8, 1.2), rotate_max_deg=179, translate_frac=(0.01, 0.01), interp=cv2.INTER_NEAREST, border=cv2.BORDER_CONSTANT), imgaug.Flip(vert=True), imgaug.Flip(horiz=True), imgaug.CenterCrop(input_shape), ] input_augs = [ imgaug.RandomApplyAug( imgaug.RandomChooseAug( [ GaussianBlur(), MedianBlur(), imgaug.GaussianNoise(), ] ), 0.5), # standard color augmentation imgaug.RandomOrderAug( [imgaug.Hue((-8, 8), rgb=True), imgaug.Saturation(0.2, rgb=True), imgaug.Brightness(26, clip=True), imgaug.Contrast((0.75, 1.25), clip=True), ]), imgaug.ToUint8(), ] label_augs = [] if self.model_type == 'unet' or self.model_type == 'micronet': label_augs =[GenInstanceUnetMap(crop_shape=output_shape)] if self.model_type == 'dcan': label_augs =[GenInstanceContourMap(crop_shape=output_shape)] if self.model_type == 'dist': label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=False)] if self.model_type == 'np_hv': label_augs = [GenInstanceHV(crop_shape=output_shape)] if self.model_type == 'np_dist': label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=True)] if not self.type_classification: label_augs.append(BinarizeLabel()) if not view: label_augs.append(imgaug.CenterCrop(output_shape)) return shape_augs, input_augs, label_augs