Python utils.load_image() Examples

The following are 14 code examples of utils.load_image(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: stylize_image.py    From fast-style-transfer with GNU General Public License v3.0 5 votes vote down vote up
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    network = options.network_path
    if not os.path.isdir(network):
        parser.error("Network %s does not exist." % network)

    content_image = utils.load_image(options.content)
    reshaped_content_height = (content_image.shape[0] - content_image.shape[0] % 4)
    reshaped_content_width = (content_image.shape[1] - content_image.shape[1] % 4)
    reshaped_content_image = content_image[:reshaped_content_height, :reshaped_content_width, :]
    reshaped_content_image = np.ndarray.reshape(reshaped_content_image, (1,) + reshaped_content_image.shape)

    prediction = ffwd(reshaped_content_image, network)
    utils.save_image(prediction, options.output_path) 
Example #2
Source File: train_network.py    From fast-style-transfer with GNU General Public License v3.0 5 votes vote down vote up
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_image = utils.load_image(options.style)
    style_image = np.ndarray.reshape(style_image, (1,) + style_image.shape)

    content_targets = utils.get_files(options.train_path)
    content_shape = utils.load_image(content_targets[0]).shape

    device = '/gpu:0' if options.use_gpu else '/cpu:0'

    style_transfer = FastStyleTransfer(
        vgg_path=VGG_PATH,
        style_image=style_image,
        content_shape=content_shape,
        content_weight=options.content_weight,
        style_weight=options.style_weight,
        tv_weight=options.style_weight,
        batch_size=options.batch_size,
        device=device)

    for iteration, network, first_image, losses in style_transfer.train(
        content_training_images=content_targets,
        learning_rate=options.learning_rate,
        epochs=options.epochs,
        checkpoint_iterations=options.checkpoint_iterations
    ):
        print_losses(losses)

        saver = tf.train.Saver()
        if (iteration % 100 == 0):
            saver.save(network, opts.save_path + '/fast_style_network.ckpt')

        saver.save(network, opts.save_path + '/fast_style_network.ckpt') 
Example #3
Source File: fast_style_transfer.py    From fast-style-transfer with GNU General Public License v3.0 5 votes vote down vote up
def _load_batch(self, image_paths):
        return np.array([utils.load_image(img_path) for j, img_path in enumerate(image_paths)]) 
Example #4
Source File: feature_extraction.py    From intermediate-cnn-features with Apache License 2.0 5 votes vote down vote up
def feature_extraction_images(model, cores, batch_sz, image_list, output_path):
    """
      Function that extracts the intermediate CNN features
      of each image in a provided image list.

      Args:
        model: CNN network
        cores: CPU cores for the parallel video loading
        batch_sz: batch size fed to the CNN network
        image_list: list of image to extract features
        output_path: path to store video features
    """
    images = [image.strip() for image in open(image_list).readlines()]
    print '\nNumber of images: ', len(images)
    print 'Storage directory: ', output_path
    print 'CPU cores: ', cores
    print 'Batch size: ', batch_sz

    print '\nFeature Extraction Process'
    print '=========================='
    pool = Pool(cores)
    batches = len(images)/batch_sz + 1
    features = np.zeros((len(images), model.final_sz))
    for batch in tqdm(xrange(batches), mininterval=1.0, unit='batches'):

        # load images in parallel
        future = []
        for image in images[batch * batch_sz: (batch+1) * batch_sz]:
            future += [pool.apply_async(load_image, args=[image, model.desired_size])]

        image_tensor = []
        for f in future:
            image_tensor += [f.get()]

        # extract features
        features[int(batch * batch_sz): int((batch + 1) * batch_sz)] = \
            model.extract(np.array(image_tensor), batch_sz)

    # save features
    np.save(os.path.join(output_path, '{}_features'.format(model.net_name)), features) 
Example #5
Source File: run_test.py    From tensorflow-fast-style-transfer with Apache License 2.0 5 votes vote down vote up
def main():

    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # load content image
    content_image = utils.load_image(args.content, max_size=args.max_size)

    # open session
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True # to deal with large image
    sess = tf.Session(config=soft_config)

    # build the graph
    transformer = style_transfer_tester.StyleTransferTester(session=sess,
                                                            model_path=args.style_model,
                                                            content_image=content_image,
                                                            )
    # execute the graph
    start_time = time.time()
    output_image = transformer.test()
    end_time = time.time()

    # save result
    utils.save_image(output_image, args.output)

    # report execution time
    shape = content_image.shape #(batch, width, height, channel)
    print('Execution time for a %d x %d image : %f msec' % (shape[0], shape[1], 1000.*float(end_time - start_time)/60)) 
Example #6
Source File: neural_style.py    From examples with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0]) 
Example #7
Source File: sunrgbd_data.py    From reading-frustum-pointnets-code with Apache License 2.0 5 votes vote down vote up
def get_image(self, idx):
        img_filename = os.path.join(self.image_dir, '%06d.jpg'%(idx))
        return utils.load_image(img_filename) 
Example #8
Source File: neural_style.py    From pytorch-multiple-style-transfer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)


    with torch.no_grad():
        style_model = TransformerNet(style_num=args.style_num)
        state_dict = torch.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image, style_id = [args.style_id]).cpu()

    utils.save_image('output/'+args.output_image+'_style'+str(args.style_id)+'.jpg', output[0]) 
Example #9
Source File: h36m_input.py    From eccv18_mtvae with MIT License 5 votes vote down vote up
def sample_image_seq(dataset_name, filename_pattern, max_length, keyframes):
  metadata = DATASET_TO_METADATA[dataset_name]
  im_height = metadata['im_height']
  im_width = metadata['im_width']
  image_seq = np.zeros((max_length, im_height, im_width, 3), dtype=np.float32)
  assert (keyframes.shape[0] == max_length)
  #print('loading images: %s' % filename_pattern)
  for i in xrange(max_length):
    #print('loading images [%02d]: %s' % (i, filename_pattern))
    image_seq[i] = utils.load_image(filename_pattern.replace('*', '%05d' % keyframes[i]))
  return image_seq 
Example #10
Source File: neural_style.py    From PyTorch with MIT License 5 votes vote down vote up
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0]) 
Example #11
Source File: neural_style.py    From ignite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))])

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = torch.load(args.model)
        style_model.to(device)
        output = style_model(content_image).cpu()
        utils.save_image(args.output_image, output[0]) 
Example #12
Source File: sunrgbd_data.py    From frustum-pointnets with Apache License 2.0 5 votes vote down vote up
def get_image(self, idx):
        img_filename = os.path.join(self.image_dir, '%06d.jpg'%(idx))
        return utils.load_image(img_filename) 
Example #13
Source File: style_transfer_trainer.py    From tensorflow-fast-style-transfer with Apache License 2.0 4 votes vote down vote up
def __init__(self, content_layer_ids, style_layer_ids, content_images, style_image, session, net, num_epochs,
                 batch_size, content_weight, style_weight, tv_weight, learn_rate, save_path, check_period, test_image,
                 max_size):

        self.net = net
        self.sess = session

        # sort layers info
        self.CONTENT_LAYERS = collections.OrderedDict(sorted(content_layer_ids.items()))
        self.STYLE_LAYERS = collections.OrderedDict(sorted(style_layer_ids.items()))

        # input images
        self.x_list = content_images
        mod = len(content_images) % batch_size
        self.x_list = self.x_list[:-mod]
        self.y_s0 = style_image
        self.content_size = len(self.x_list)

        # parameters for optimization
        self.num_epochs = num_epochs
        self.content_weight = content_weight
        self.style_weight = style_weight
        self.tv_weight = tv_weight
        self.learn_rate = learn_rate
        self.batch_size = batch_size
        self.check_period = check_period

        # path for model to be saved
        self.save_path = save_path

        # image transform network
        self.transform = transform.Transform()
        self.tester = transform.Transform('test')

        # build graph for style transfer
        self._build_graph()

        # test during training
        if test_image is not None:
            self.TEST = True

            # load content image
            self.test_image = utils.load_image(test_image, max_size=max_size)

            # build graph
            self.x_test = tf.placeholder(tf.float32, shape=self.test_image.shape, name='test_input')
            self.xi_test = tf.expand_dims(self.x_test, 0)  # add one dim for batch

            # result image from transform-net
            self.y_hat_test = self.tester.net(
                self.xi_test / 255.0)  # please build graph for train first. tester.net reuses variables.

        else:
            self.TEST = False 
Example #14
Source File: run_train.py    From tensorflow-fast-style-transfer with Apache License 2.0 4 votes vote down vote up
def main():

    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # initiate VGG19 model
    model_file_path = args.vgg_model + '/' + vgg19.MODEL_FILE_NAME
    vgg_net = vgg19.VGG19(model_file_path)

    # get file list for training
    content_images = utils.get_files(args.trainDB_path)

    # load style image
    style_image = utils.load_image(args.style)

    # create a map for content layers info
    CONTENT_LAYERS = {}
    for layer, weight in zip(args.content_layers,args.content_layer_weights):
        CONTENT_LAYERS[layer] = weight

    # create a map for style layers info
    STYLE_LAYERS = {}
    for layer, weight in zip(args.style_layers, args.style_layer_weights):
        STYLE_LAYERS[layer] = weight

    # open session
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

    # build the graph for train
    trainer = style_transfer_trainer.StyleTransferTrainer(session=sess,
                                                          content_layer_ids=CONTENT_LAYERS,
                                                          style_layer_ids=STYLE_LAYERS,
                                                          content_images=content_images,
                                                          style_image=add_one_dim(style_image),
                                                          net=vgg_net,
                                                          num_epochs=args.num_epochs,
                                                          batch_size=args.batch_size,
                                                          content_weight=args.content_weight,
                                                          style_weight=args.style_weight,
                                                          tv_weight=args.tv_weight,
                                                          learn_rate=args.learn_rate,
                                                          save_path=args.output,
                                                          check_period=args.checkpoint_every,
                                                          test_image=args.test,
                                                          max_size=args.max_size,
                                                          )
    # launch the graph in a session
    trainer.train()

    # close session
    sess.close()