Python utils.image.resize() Examples
The following are 7
code examples of utils.image.resize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils.image
, or try the search function
.
Example #1
Source File: batch_generator.py From BirdCLEF-Baseline with MIT License | 6 votes |
def loadImageAndTarget(sample, augmentation): # Load image img = image.openImage(sample[0], cfg.IM_DIM) # Resize Image img = image.resize(img, cfg.IM_SIZE[0], cfg.IM_SIZE[1], mode=cfg.RESIZE_MODE) # Do image Augmentation if augmentation: img = image.augment(img, cfg.IM_AUGMENTATION, cfg.AUGMENTATION_COUNT, cfg.AUGMENTATION_PROBABILITY) # Prepare image for net input img = image.normalize(img, cfg.ZERO_CENTERED_NORMALIZATION) img = image.prepare(img) # Get target label = sample[1] index = cfg.CLASSES.index(label) target = np.zeros((len(cfg.CLASSES)), dtype='float32') target[index] = 1.0 return img, target #################### BATCH HANDLING #####################
Example #2
Source File: submission_soundscape.py From BirdCLEF-Baseline with MIT License | 4 votes |
def getSpecBatches(split): # Random Seed random = cfg.getRandomState() # Make predictions for every testfile for t in split: # Spec batch spec_batch = [] # Keep track of timestamps pred_start = 0 # Get specs for file for spec in audio.specsFromFile(t[0], cfg.SAMPLE_RATE, cfg.SPEC_LENGTH, cfg.SPEC_OVERLAP, cfg.SPEC_MINLEN, shape=(cfg.IM_SIZE[1], cfg.IM_SIZE[0]), fmin=cfg.SPEC_FMIN, fmax=cfg.SPEC_FMAX): # Resize spec spec = image.resize(spec, cfg.IM_SIZE[0], cfg.IM_SIZE[1], mode=cfg.RESIZE_MODE) # Normalize spec spec = image.normalize(spec, cfg.ZERO_CENTERED_NORMALIZATION) # Prepare as input spec = image.prepare(spec) # Add to batch if len(spec_batch) > 0: spec_batch = np.vstack((spec_batch, spec)) else: spec_batch = spec # Batch too large? if spec_batch.shape[0] >= cfg.MAX_SPECS_PER_FILE: break # Do we have enough specs for a prediction? if len(spec_batch) >= cfg.SPECS_PER_PREDICTION: # Calculate next timestamp pred_end = pred_start + cfg.SPEC_LENGTH + ((len(spec_batch) - 1) * (cfg.SPEC_LENGTH - cfg.SPEC_OVERLAP)) # Store prediction ts = getTimestamp(int(pred_start), int(pred_end)) # Advance to next timestamp pred_start = pred_end - cfg.SPEC_OVERLAP yield spec_batch, t[1], ts, t[0].split(os.sep)[-1] # Spec batch spec_batch = []
Example #3
Source File: test.py From BirdCLEF-Baseline with MIT License | 4 votes |
def getSpecBatches(split): # Random Seed random = cfg.getRandomState() # Make predictions for every testfile for t in split: # Spec batch spec_batch = [] # Get specs for file for spec in audio.specsFromFile(t[0], cfg.SAMPLE_RATE, cfg.SPEC_LENGTH, cfg.SPEC_OVERLAP, cfg.SPEC_MINLEN, shape=(cfg.IM_SIZE[1], cfg.IM_SIZE[0]), fmin=cfg.SPEC_FMIN, fmax=cfg.SPEC_FMAX, spec_type=cfg.SPEC_TYPE): # Resize spec spec = image.resize(spec, cfg.IM_SIZE[0], cfg.IM_SIZE[1], mode=cfg.RESIZE_MODE) # Normalize spec spec = image.normalize(spec, cfg.ZERO_CENTERED_NORMALIZATION) # Prepare as input spec = image.prepare(spec) # Add to batch if len(spec_batch) > 0: spec_batch = np.vstack((spec_batch, spec)) else: spec_batch = spec # Batch too large? if spec_batch.shape[0] >= cfg.MAX_SPECS_PER_FILE: break # No specs? if len(spec_batch) == 0: spec = random.normal(0.0, 1.0, (cfg.IM_SIZE[1], cfg.IM_SIZE[0])) spec_batch = image.prepare(spec) # Shuffle spec batch spec_batch = shuffle(spec_batch, random_state=random) # yield batch, labels and filename yield spec_batch[:cfg.MAX_SPECS_PER_FILE], t[1], t[0].split(os.sep)[-1]
Example #4
Source File: deform_conv_demo.py From kaggle-rsna18 with MIT License | 4 votes |
def main(): # get symbol pprint.pprint(config) sym_instance = eval(config.symbol + '.' + config.symbol)() sym = sym_instance.get_symbol(config, is_train=False) # load demo data image_names = ['000240.jpg', '000437.jpg', '004072.jpg', '007912.jpg'] image_all = [] data = [] for im_name in image_names: assert os.path.exists(cur_path + '/../demo/deform_conv/' + im_name), \ ('%s does not exist'.format('../demo/deform_conv/' + im_name)) im = cv2.imread(cur_path + '/../demo/deform_conv/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) image_all.append(im) target_size = config.SCALES[0][0] max_size = config.SCALES[0][1] im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE) im_tensor = transform(im, config.network.PIXEL_MEANS) im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32) data.append({'data': im_tensor, 'im_info': im_info}) # get predictor data_names = ['data', 'im_info'] label_names = [] data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))] max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]] provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))] provide_label = [None for i in xrange(len(data))] arg_params, aux_params = load_param(cur_path + '/../model/deform_conv', 0, process=True) predictor = Predictor(sym, data_names, label_names, context=[mx.gpu(0)], max_data_shapes=max_data_shape, provide_data=provide_data, provide_label=provide_label, arg_params=arg_params, aux_params=aux_params) # test for idx, _ in enumerate(image_names): data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx, provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]], provide_label=[None]) output = predictor.predict(data_batch) res5a_offset = output[0]['res5a_branch2b_offset_output'].asnumpy() res5b_offset = output[0]['res5b_branch2b_offset_output'].asnumpy() res5c_offset = output[0]['res5c_branch2b_offset_output'].asnumpy() im = image_all[idx] im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) show_dconv_offset(im, [res5c_offset, res5b_offset, res5a_offset])
Example #5
Source File: deform_psroi_demo.py From kaggle-rsna18 with MIT License | 4 votes |
def main(): # get symbol pprint.pprint(config) sym_instance = eval(config.symbol + '.' + config.symbol)() sym = sym_instance.get_symbol_rfcn(config, is_train=False) # load demo data image_names = ['000057.jpg', '000149.jpg', '000351.jpg', '002535.jpg'] image_all = [] # ground truth boxes gt_boxes_all = [np.array([[132, 52, 384, 357]]), np.array([[113, 1, 350, 360]]), np.array([[0, 27, 329, 155]]), np.array([[8, 40, 499, 289]])] gt_classes_all = [np.array([3]), np.array([16]), np.array([7]), np.array([12])] data = [] for idx, im_name in enumerate(image_names): assert os.path.exists(cur_path + '/../demo/deform_psroi/' + im_name), \ ('%s does not exist'.format('../demo/deform_psroi/' + im_name)) im = cv2.imread(cur_path + '/../demo/deform_psroi/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) image_all.append(im) target_size = config.SCALES[0][0] max_size = config.SCALES[0][1] im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE) im_tensor = transform(im, config.network.PIXEL_MEANS) gt_boxes = gt_boxes_all[idx] gt_boxes = np.round(gt_boxes * im_scale) data.append({'data': im_tensor, 'rois': np.hstack((np.zeros((gt_boxes.shape[0], 1)), gt_boxes))}) # get predictor data_names = ['data', 'rois'] label_names = [] data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))] max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]] provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))] provide_label = [None for i in xrange(len(data))] arg_params, aux_params = load_param(cur_path + '/../model/deform_psroi', 0, process=True) predictor = Predictor(sym, data_names, label_names, context=[mx.gpu(0)], max_data_shapes=max_data_shape, provide_data=provide_data, provide_label=provide_label, arg_params=arg_params, aux_params=aux_params) # test for idx, _ in enumerate(image_names): data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx, provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]], provide_label=[None]) output = predictor.predict(data_batch) cls_offset = output[0]['rfcn_cls_offset_output'].asnumpy() im = image_all[idx] im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) boxes = gt_boxes_all[idx] show_dpsroi_offset(im, boxes, cls_offset, gt_classes_all[idx])
Example #6
Source File: deform_conv_demo.py From Deformable-ConvNets with MIT License | 4 votes |
def main(): # get symbol pprint.pprint(config) sym_instance = eval(config.symbol + '.' + config.symbol)() sym = sym_instance.get_symbol(config, is_train=False) # load demo data image_names = ['000240.jpg', '000437.jpg', '004072.jpg', '007912.jpg'] image_all = [] data = [] for im_name in image_names: assert os.path.exists(cur_path + '/../demo/deform_conv/' + im_name), \ ('%s does not exist'.format('../demo/deform_conv/' + im_name)) im = cv2.imread(cur_path + '/../demo/deform_conv/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) image_all.append(im) target_size = config.SCALES[0][0] max_size = config.SCALES[0][1] im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE) im_tensor = transform(im, config.network.PIXEL_MEANS) im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32) data.append({'data': im_tensor, 'im_info': im_info}) # get predictor data_names = ['data', 'im_info'] label_names = [] data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))] max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]] provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))] provide_label = [None for i in xrange(len(data))] arg_params, aux_params = load_param(cur_path + '/../model/deform_conv', 0, process=True) predictor = Predictor(sym, data_names, label_names, context=[mx.gpu(0)], max_data_shapes=max_data_shape, provide_data=provide_data, provide_label=provide_label, arg_params=arg_params, aux_params=aux_params) # test for idx, _ in enumerate(image_names): data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx, provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]], provide_label=[None]) output = predictor.predict(data_batch) res5a_offset = output[0]['res5a_branch2b_offset_output'].asnumpy() res5b_offset = output[0]['res5b_branch2b_offset_output'].asnumpy() res5c_offset = output[0]['res5c_branch2b_offset_output'].asnumpy() im = image_all[idx] im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) show_dconv_offset(im, [res5c_offset, res5b_offset, res5a_offset])
Example #7
Source File: deform_psroi_demo.py From Deformable-ConvNets with MIT License | 4 votes |
def main(): # get symbol pprint.pprint(config) sym_instance = eval(config.symbol + '.' + config.symbol)() sym = sym_instance.get_symbol_rfcn(config, is_train=False) # load demo data image_names = ['000057.jpg', '000149.jpg', '000351.jpg', '002535.jpg'] image_all = [] # ground truth boxes gt_boxes_all = [np.array([[132, 52, 384, 357]]), np.array([[113, 1, 350, 360]]), np.array([[0, 27, 329, 155]]), np.array([[8, 40, 499, 289]])] gt_classes_all = [np.array([3]), np.array([16]), np.array([7]), np.array([12])] data = [] for idx, im_name in enumerate(image_names): assert os.path.exists(cur_path + '/../demo/deform_psroi/' + im_name), \ ('%s does not exist'.format('../demo/deform_psroi/' + im_name)) im = cv2.imread(cur_path + '/../demo/deform_psroi/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) image_all.append(im) target_size = config.SCALES[0][0] max_size = config.SCALES[0][1] im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE) im_tensor = transform(im, config.network.PIXEL_MEANS) gt_boxes = gt_boxes_all[idx] gt_boxes = np.round(gt_boxes * im_scale) data.append({'data': im_tensor, 'rois': np.hstack((np.zeros((gt_boxes.shape[0], 1)), gt_boxes))}) # get predictor data_names = ['data', 'rois'] label_names = [] data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))] max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]] provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))] provide_label = [None for i in xrange(len(data))] arg_params, aux_params = load_param(cur_path + '/../model/deform_psroi', 0, process=True) predictor = Predictor(sym, data_names, label_names, context=[mx.gpu(0)], max_data_shapes=max_data_shape, provide_data=provide_data, provide_label=provide_label, arg_params=arg_params, aux_params=aux_params) # test for idx, _ in enumerate(image_names): data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx, provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]], provide_label=[None]) output = predictor.predict(data_batch) cls_offset = output[0]['rfcn_cls_offset_output'].asnumpy() im = image_all[idx] im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) boxes = gt_boxes_all[idx] show_dpsroi_offset(im, boxes, cls_offset, gt_classes_all[idx])