Python model.loss() Examples

The following are 21 code examples of model.loss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module model , or try the search function .
Example #1
Source File: train.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def iterate(self, data):
        for key in data:
            t = data[key]
            if torch.is_tensor(t):
                data[key] = utils.ensure_device(t)
        tensor = torch.autograd.Variable(data['tensor'])
        pred = pybenchmark.profile('inference')(model._inference)(self.inference, tensor)
        height, width = data['image'].size()[1:3]
        rows, cols = pred['feature'].size()[-2:]
        loss, debug = pybenchmark.profile('loss')(model.loss)(self.anchors, norm_data(data, height, width, rows, cols), pred, self.config.getfloat('model', 'threshold'))
        loss_hparam = {key: loss[key] * self.config.getfloat('hparam', key) for key in loss}
        loss_total = sum(loss_hparam.values())
        self.optimizer.zero_grad()
        loss_total.backward()
        try:
            clip = self.config.getfloat('train', 'clip')
            nn.utils.clip_grad_norm(self.inference.parameters(), clip)
        except configparser.NoOptionError:
            pass
        self.optimizer.step()
        return dict(
            height=height, width=width, rows=rows, cols=cols,
            data=data, pred=pred, debug=debug,
            loss_total=loss_total, loss=loss, loss_hparam=loss_hparam,
        ) 
Example #2
Source File: operations.py    From Saliency_Detection_Convolutional_Autoencoder with MIT License 6 votes vote down vote up
def create_model(name, batch_size, learning_rate = 0.0001, wd = 0.00001, concat = False, l2_loss = False, penalty = False, coef = 0.4, verbosity = 0):
  """
  Create a model from model.py with the given configuration
  
  Args:
    name             : name of the model (used to create a specific folder to save/load parameters)
    batch_size       : batch size
    learning_rate    : learning_rate (cross entropy is arround 100* bigger than l2)
    wd               : weight decay factor
    concat           : does this model include direct connections?
    l2_loss          : does this model use l2 loss (if not then cross entropy)
    penalty          : whether to use the edge contrast penalty
    coef             : coef for the edge contrast penalty
    verbosity        : level of details to display
    
  Returns:
    my_model         : created model
  """
  
  my_model = model.MODEL(name, batch_size, learning_rate, wd, concat, l2_loss, penalty, coef)
  my_model.display_info(verbosity)
  return my_model 
Example #3
Source File: train.py    From tensorflow-pos-tagger with GNU General Public License v3.0 6 votes vote down vote up
def logging_init(model, graph):
    """
    Set up logging so that progress can be visualised in TensorBoard.
    """
    # Add ops to record summaries for loss and accuracy...
    train_loss = tf.summary.scalar("train_loss", model.loss)
    train_accuracy = tf.summary.scalar("train_accuracy", model.accuracy)
    # ...then merge these ops into one single op so that they easily be run
    # together
    train_summary_ops = tf.summary.merge([train_loss, train_accuracy])
    # Same ops, but with different names, so that train/test results show up
    # separately in TensorBoard
    test_loss = tf.summary.scalar("test_loss", model.loss)
    test_accuracy = tf.summary.scalar("test_accuracy", model.accuracy)
    test_summary_ops = tf.summary.merge([test_loss, test_accuracy])

    timestamp = int(time.time())
    run_log_dir = os.path.join(LOGS_DIR, str(timestamp))
    os.makedirs(run_log_dir)
    # (this step also writes the graph to the events file so that
    # it shows up in TensorBoard)
    summary_writer = tf.summary.FileWriter(run_log_dir, graph)

    return train_summary_ops, test_summary_ops, summary_writer 
Example #4
Source File: multigpu_train.py    From EAST_ICPR with GNU General Public License v3.0 6 votes vote down vote up
def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=True)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)

    return total_loss, model_loss 
Example #5
Source File: multigpu_train.py    From uai-sdk with Apache License 2.0 6 votes vote down vote up
def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=True)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)

    return total_loss, model_loss 
Example #6
Source File: train.py    From vae-audio with MIT License 5 votes vote down vote up
def main(config):
    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.initialize('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = config.initialize('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss = getattr(module_loss, config['loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = config.initialize('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer)

    trainer = getattr(module_trainer, config['trainer']['type'])(model, loss, metrics, optimizer,
                                                                    config=config,
                                                                    data_loader=data_loader,
                                                                    valid_data_loader=valid_data_loader,
                                                                    lr_scheduler=lr_scheduler)
    trainer.train() 
Example #7
Source File: trainer.py    From textboxes with MIT License 5 votes vote down vote up
def __init__(self, model_dir=None):
		self.sess = tf.Session()
		
		self.imgs_ph, self.bn, self.output_tensors, self.pred_labels, self.pred_locs = model.model(self.sess)

		total_boxes = self.pred_labels.get_shape().as_list()[1]
		self.positives_ph, self.negatives_ph, self.true_labels_ph, self.true_locs_ph, self.total_loss, self.class_loss, self.loc_loss = \
			model.loss(self.pred_labels, self.pred_locs, total_boxes)

		out_shapes = [out.get_shape().as_list() for out in self.output_tensors]

		c.out_shapes = out_shapes
		
		c.defaults = model.default_boxes(out_shapes)
		# variables in model are already initialized, so only initialize those declared after
		with tf.variable_scope("optimizer"):
			self.global_step = tf.Variable(0)
			self.lr_ph = tf.placeholder(tf.float32)
			self.optimizer = tf.train.AdamOptimizer(1e-3).minimize(self.total_loss, global_step=self.global_step)
		new_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="optimizer")
		init = tf.variables_initializer(new_vars)
		self.sess.run(init)

		if model_dir is None:
			model_dir = FLAGS.model_dir

		ckpt = tf.train.get_checkpoint_state(model_dir)
		self.saver = tf.train.Saver()

		if ckpt and ckpt.model_checkpoint_path:
			self.saver.restore(self.sess, ckpt.model_checkpoint_path)
			print("restored %s" % ckpt.model_checkpoint_path) 
Example #8
Source File: trainer.py    From DDRNet with MIT License 5 votes vote down vote up
def fit(self):
        is_training = True
        config, params = self.config, self.params

        # start training from previous global_step
        start_step = self.sess.run(params["global_step"])
        if not start_step == 0:
            print("Start training from previous {} steps".format(start_step))

        for step in range(start_step, config.max_steps):
            t1 = time.time()

            # # dbg filter condition.
            # diff = self.sess.run(self.diff)
            # if diff < config.diff_thres: logging.debug(diff)
            # else: logging.debug('diff too large. discard.'+str(diff))

            loss, loss1, loss2, _ = self.sess.run([self.total_loss, self.loss1, self.loss2, self.train_op],
                          feed_dict={params["is_training"]: is_training})
            print('step {}: loss: {:.2f}\t loss1: {:.2f}\t loss2: {:.2f}'.format(step, loss, loss1, loss2))
            t2 = time.time()

            if step % config.summary_every_n_steps == 0:
                summary_feed_dict = {params["is_training"]: is_training}
                self.make_summary(summary_feed_dict, step)

                eta = (t2 - t1) * (config.max_steps - step + 1)
                print("Finished {}/{} steps, ETA:{:.2f} seconds".format(step, config.max_steps, eta))
                utils.flush_stdout()

            if step % config.save_model_steps == 0:
                self.saver.save(self.sess, os.path.join(config.logdir,
                    "{}-{}".format(config.checkpoint_basename.split('/')[-1], step)))

        self.saver.save(self.sess, os.path.join(config.logdir,
            "{}-{}".format(config.checkpoint_basename.split('/')[-1], config.max_steps))) 
Example #9
Source File: train.py    From tensorflow-pos-tagger with GNU General Public License v3.0 5 votes vote down vote up
def step(sess, model, standard_ops, train_ops, test_ops, x, y, summary_writer,
         train):
    feed_dict = {model.input_x: x, model.input_y: y}

    if train:
        step, loss, accuracy, _, summaries = sess.run(standard_ops + train_ops,
                                                      feed_dict)
    else:
        step, loss, accuracy, summaries = sess.run(standard_ops + test_ops,
                                                   feed_dict)

    print("Step %d: loss %.1f, accuracy %d%%" % (step, loss, 100 * accuracy))
    summary_writer.add_summary(summaries, step) 
Example #10
Source File: train.py    From tensorflow-pos-tagger with GNU General Public License v3.0 5 votes vote down vote up
def model_init(vocab_size, embedding_size, n_past_words, n_pos_tags):
    pos_tagger = model.Tagger(vocab_size, embedding_size, n_past_words,
                              n_pos_tags)

    global_step = tf.Variable(
        initial_value=0, name="global_step", trainable=False)
    optimizer = tf.train.AdamOptimizer()
    train_op = optimizer.minimize(pos_tagger.loss, global_step=global_step)

    return pos_tagger, train_op, global_step 
Example #11
Source File: mnist_multi_gpu_train.py    From mnist-multi-gpu with Apache License 2.0 5 votes vote down vote up
def tower_loss(scope):
    """Calculate the total loss on a single tower running the MNIST model.
  
    Args:
      scope: unique prefix string identifying the MNIST tower, e.g. 'tower_0'
  
    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """
    # Get images and labels for MSNIT.
    images, labels = model.inputs(FLAGS.batch_size)

    # Build inference Graph.
    logits = model.inference(images, keep_prob=0.5)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = model.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do
    # the same for the averaged version of the losses.
    if (FLAGS.tb_logging):
        for l in losses + [total_loss]:
            # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
            # training session. This helps the clarity of presentation on
            # tensorboard.
            loss_name = re.sub('%s_[0-9]*/' % model.TOWER_NAME, '', l.op.name)
            tf.summary.scalar(loss_name, l)

    return total_loss 
Example #12
Source File: distgpu_train.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def _tower_fn(is_training, images, score_maps, geo_maps, training_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        f_score, f_geometry = model.model(images, is_training=is_training)

    model_loss = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    summaries = None
    if reuse_variables is None:
        image_sum = tf.summary.image('input', images)
        score_sum = tf.summary.image('score_map', score_maps)
        f_score_sum = tf.summary.image('score_map_pred', f_score * 255)
        geo_sum = tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        f_geo_sum = tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        mask_sum = tf.summary.image('training_masks', training_masks)
        loss1_sum = tf.summary.scalar('model_loss', model_loss)
        loss_sum = tf.summary.scalar('total_loss', total_loss)
        summaries = [image_sum, score_sum, f_score_sum, geo_sum, f_geo_sum, mask_sum, loss1_sum, loss_sum]

    model_params = tf.trainable_variables()
    tower_grad = tf.gradients(total_loss, model_params)

    return total_loss, zip(tower_grad, model_params), summaries 
Example #13
Source File: train.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def copy_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        loss_total = loss_total.data.clone().cpu().numpy()
        loss = {key: l.data.clone().cpu().numpy() for key, l in loss.items()}
        loss_hparam = {key: l.data.clone().cpu().numpy() for key, l in loss_hparam.items()}
        return dict(
            step=step,
            loss_total=loss_total,
            loss=loss, loss_hparam=loss_hparam,
        ) 
Example #14
Source File: multigpu_train.py    From ICPR_TextDection with GNU General Public License v3.0 5 votes vote down vote up
def tower_loss(images, score_maps, geo_maps, training_masks, weights_masks, reuse_variables=None):
    # Build inference graph
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
        # f_score, f_geometry = model.model(images, is_training=True)
        f_score, f_geometry = model.model_InceptionResNet(images, is_training=True)
        # f_score, f_geometry = model.model_InceptionResNet_BLSTM(images, is_training=True)
        # f_score, f_geometry = model.model_InceptionResNet_symmetry(images, is_training=True)
    model_loss, L_g, L_s, L_s_c = model.loss(score_maps, f_score,
                            geo_maps, f_geometry,
                            training_masks, weights_masks)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

    # add summary
    if reuse_variables is None:
        tf.summary.image('input', images)
        tf.summary.image('score_map', score_maps)
        tf.summary.image('score_map_pred', f_score * 255)
        tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])
        tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])
        tf.summary.image('training_masks', training_masks)
        tf.summary.scalar('model_loss', model_loss)
        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('geometry_loss', L_g)
        tf.summary.scalar('score_loss', L_s)

    return total_loss, model_loss, L_g, L_s, L_s_c 
Example #15
Source File: train.py    From pytorch-template with MIT License 5 votes vote down vote up
def main(config):
    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.init_obj('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = config.init_obj('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    criterion = getattr(module_loss, config['loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = config.init_obj('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer)

    trainer = Trainer(model, criterion, metrics, optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler)

    trainer.train() 
Example #16
Source File: train.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def check_nan(self, **kwargs):
        step, loss_total, loss, data = (kwargs[key] for key in 'step, loss_total, loss, data'.split(', '))
        if np.isnan(loss_total.data.cpu()[0]):
            dump_dir = os.path.join(self.model_dir, str(step))
            os.makedirs(dump_dir, exist_ok=True)
            torch.save(collections.OrderedDict([(key, var.cpu()) for key, var in self.dnn.state_dict().items()]), os.path.join(dump_dir, 'model.pth'))
            torch.save(data, os.path.join(dump_dir, 'data.pth'))
            for key, l in loss.items():
                logging.warning('%s=%f' % (key, l.data.cpu()[0]))
            raise OverflowError('NaN loss detected, dump runtime information into ' + dump_dir) 
Example #17
Source File: train.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def summary_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        for key, l in loss.items():
            self.writer.add_scalar('loss/' + key, l[0], step)
        if self.config.getboolean('summary_scalar', 'loss_hparam'):
            self.writer.add_scalars('loss_hparam', {key: l[0] for key, l in loss_hparam.items()}, step)
        self.writer.add_scalar('loss_total', loss_total[0], step) 
Example #18
Source File: test.py    From vae-audio with MIT License 4 votes vote down vote up
def main(config, resume):
    logger = config.get_logger('test')

    # setup data_loader instances
    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=512,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2
    )

    # build model architecture
    model = config.initialize('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss_fn = getattr(module_loss, config['loss'])
    metric_fns = [getattr(module_metric, met) for met in config['metrics']]

    logger.info('Loading checkpoint: {} ...'.format(resume))
    checkpoint = torch.load(resume)
    state_dict = checkpoint['state_dict']
    if config['n_gpu'] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    # prepare model for testing
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    model.eval()

    total_loss = 0.0
    total_metrics = torch.zeros(len(metric_fns))

    with torch.no_grad():
        for i, (data, target) in enumerate(tqdm(data_loader)):
            data, target = data.to(device), target.to(device)
            output = model(data)

            #
            # save sample images, or do something with output here
            #

            # computing loss, metrics on test set
            loss = loss_fn(output, target)
            batch_size = data.shape[0]
            total_loss += loss.item() * batch_size
            for i, metric in enumerate(metric_fns):
                total_metrics[i] += metric(output, target) * batch_size

    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    log.update({
        met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)
    })
    logger.info(log) 
Example #19
Source File: train.py    From acdc_segmenter with Apache License 2.0 4 votes vote down vote up
def do_eval(sess,
            eval_loss,
            images_placeholder,
            labels_placeholder,
            training_time_placeholder,
            images,
            labels,
            batch_size):

    '''
    Function for running the evaluations every X iterations on the training and validation sets. 
    :param sess: The current tf session 
    :param eval_loss: The placeholder containing the eval loss
    :param images_placeholder: Placeholder for the images
    :param labels_placeholder: Placeholder for the masks
    :param training_time_placeholder: Placeholder toggling the training/testing mode. 
    :param images: A numpy array or h5py dataset containing the images
    :param labels: A numpy array or h45py dataset containing the corresponding labels 
    :param batch_size: The batch_size to use. 
    :return: The average loss (as defined in the experiment), and the average dice over all `images`. 
    '''

    loss_ii = 0
    dice_ii = 0
    num_batches = 0

    for batch in BackgroundGenerator(iterate_minibatches(images, labels, batch_size=batch_size, augment_batch=False)):  # No aug in evaluation
    # As before you can wrap the iterate_minibatches function in the BackgroundGenerator class for speed improvements
    # but at the risk of not catching exceptions

        x, y = batch

        if y.shape[0] < batch_size:
            continue

        feed_dict = { images_placeholder: x,
                      labels_placeholder: y,
                      training_time_placeholder: False}

        closs, cdice = sess.run(eval_loss, feed_dict=feed_dict)
        loss_ii += closs
        dice_ii += cdice
        num_batches += 1

    avg_loss = loss_ii / num_batches
    avg_dice = dice_ii / num_batches

    logging.info('  Average loss: %0.04f, average dice: %0.04f' % (avg_loss, avg_dice))

    return avg_loss, avg_dice 
Example #20
Source File: train.py    From tensorflow-pos-tagger with GNU General Public License v3.0 4 votes vote down vote up
def main():
    args = parse_args()

    sess = tf.Session()

    train_batches, test_data, n_pos_tags = load_data(
        args.data_path, args.vocab_size, args.n_past_words,
        args.test_proportion, args.batch_size, args.n_epochs)
    x_test = test_data['x']
    y_test = test_data['y']
    pos_tagger, train_op, global_step = model_init(
        args.vocab_size, args.embedding_dim, args.n_past_words, n_pos_tags)
    train_summary_ops, test_summary_ops, summary_writer = logging_init(
        pos_tagger, sess.graph)
    saver = checkpointing_init()

    sess.run(tf.global_variables_initializer())
    sess.graph.finalize()

    standard_ops = [global_step, pos_tagger.loss, pos_tagger.accuracy]
    train_ops = [train_op, train_summary_ops]
    test_ops = [test_summary_ops]

    for batch in train_batches:
        x_batch, y_batch = zip(*batch)
        step(
            sess,
            pos_tagger,
            standard_ops,
            train_ops,
            test_ops,
            x_batch,
            y_batch,
            summary_writer,
            train=True)
        current_step = tf.train.global_step(sess, global_step)

        if current_step % EVALUATE_EVERY == 0:
            print("\nEvaluation:")
            step(
                sess,
                pos_tagger,
                standard_ops,
                train_ops,
                test_ops,
                x_test,
                y_test,
                summary_writer,
                train=False)
            print("")

        if current_step % CHECKPOINT_EVERY == 0:
            prefix = os.path.join(CHECKPOINT_DIR, 'model')
            path = saver.save(sess, prefix, global_step=current_step)
            print("Saved model checkpoint to '%s'" % path) 
Example #21
Source File: test.py    From pytorch-template with MIT License 4 votes vote down vote up
def main(config):
    logger = config.get_logger('test')

    # setup data_loader instances
    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=512,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2
    )

    # build model architecture
    model = config.init_obj('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss_fn = getattr(module_loss, config['loss'])
    metric_fns = [getattr(module_metric, met) for met in config['metrics']]

    logger.info('Loading checkpoint: {} ...'.format(config.resume))
    checkpoint = torch.load(config.resume)
    state_dict = checkpoint['state_dict']
    if config['n_gpu'] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    # prepare model for testing
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    model.eval()

    total_loss = 0.0
    total_metrics = torch.zeros(len(metric_fns))

    with torch.no_grad():
        for i, (data, target) in enumerate(tqdm(data_loader)):
            data, target = data.to(device), target.to(device)
            output = model(data)

            #
            # save sample images, or do something with output here
            #

            # computing loss, metrics on test set
            loss = loss_fn(output, target)
            batch_size = data.shape[0]
            total_loss += loss.item() * batch_size
            for i, metric in enumerate(metric_fns):
                total_metrics[i] += metric(output, target) * batch_size

    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    log.update({
        met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)
    })
    logger.info(log)