Python tensorboard.SummaryWriter() Examples

The following are 12 code examples of tensorboard.SummaryWriter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorboard , or try the search function .
Example #1
Source File: train.py    From pytorch-bilstmcrf with MIT License 6 votes vote down vote up
def __init__(self, sargs, input_vocabs, label_vocab, *args,
                 val_data=None, **kwargs):
        super(LSTMCRFTrainer, self).__init__(*args, **kwargs)

        self.args = sargs
        self.input_vocabs = input_vocabs
        self.label_vocab = label_vocab
        self.val_data = val_data
        self.writer = None

        if self.args.tensorboard:
            self.writer = T.SummaryWriter(self.args.save_dir)

        self.repeatables = {
            self.args.ckpt_period: self.save_checkpoint
        }

        if self.args.val:
            self.repeatables[self.args.val_period] = \
                self.validate 
Example #2
Source File: custom_callbacks.py    From mxnet-ssd with MIT License 6 votes vote down vote up
def __init__(self, dist_logging_dir=None, scalar_logging_dir=None,
                 logfile_path=None, batch_size=None, iter_monitor=0,
                 frequent=None, prefix='ssd'):
        self.scalar_logging_dir = scalar_logging_dir
        self.dist_logging_dir = dist_logging_dir
        self.logfile_path = logfile_path
        self.batch_size = batch_size
        self.iter_monitor = iter_monitor
        self.frequent = frequent
        self.prefix = prefix
        self.batch = 0
        self.line_idx = 0
        try:
            from tensorboard import SummaryWriter
            self.dist_summary_writer = SummaryWriter(dist_logging_dir)
            self.scalar_summary_writer = SummaryWriter(scalar_logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.') 
Example #3
Source File: custom_callbacks.py    From mxnet-ssd with MIT License 6 votes vote down vote up
def __init__(self, logging_dir=None, prefix='val', images_path=None,
                 class_names=None, batch_size=None, mean_pixels=None, det_thresh=0.5):

        self.logging_dir = logging_dir
        self.prefix = prefix
        if not os.path.exists(images_path):
            os.mkdir(images_path)
        self.images_path = images_path
        self.class_names = class_names
        self.batch_size = batch_size
        self.mean_pixels = mean_pixels
        self.det_thresh = det_thresh
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.') 
Example #4
Source File: tensor_board.py    From VideoSearchEngine with MIT License 5 votes vote down vote up
def create_writer():
    return SummaryWriter(LOG_DIR) 
Example #5
Source File: tensor_board.py    From VideoSearchEngine with MIT License 5 votes vote down vote up
def create_writer():
    return SummaryWriter(LOG_DIR) 
Example #6
Source File: tensorboard.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def __init__(self, logging_dir, prefix=None):
        self.prefix = prefix
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.') 
Example #7
Source File: test_summary_writer.py    From tensorboard with Apache License 2.0 5 votes vote down vote up
def test_log_scalar_summary():
    logdir = './experiment/scalar'
    writer = SummaryWriter(logdir)
    for i in range(10):
        writer.add_scalar('test_scalar', i+1)
    writer.close() 
Example #8
Source File: custom_callbacks.py    From mxnet-ssd with MIT License 5 votes vote down vote up
def __init__(self, logging_dir=None, prefix='val', roc_path=None, class_names=None):
        self.prefix = prefix
        self.roc_path = roc_path
        self.class_names = class_names
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.') 
Example #9
Source File: custom_callbacks.py    From mxnet-ssd with MIT License 5 votes vote down vote up
def __init__(self, logging_dir, prefix=None, layers_list=None):
        self.prefix = prefix
        self.layers_list = layers_list
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.') 
Example #10
Source File: tensorboard.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def __init__(self, logging_dir, prefix=None):
        self.prefix = prefix
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.') 
Example #11
Source File: capsulenet.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 4 votes vote down vote up
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay):
    summary_writer = SummaryWriter(args.tblog_dir)
    lr_scheduler = SimpleLRScheduler(learning_rate)
    optimizer_params = {'lr_scheduler': lr_scheduler}
    module.init_params()
    module.init_optimizer(kvstore=kvstore,
                          optimizer=optimizer,
                          optimizer_params=optimizer_params)
    n_epoch = 0
    while True:
        if n_epoch >= num_epoch:
            break
        train_iter.reset()
        val_iter.reset()
        loss_metric.reset()
        for n_batch, data_batch in enumerate(train_iter):
            module.forward_backward(data_batch)
            module.update()
            module.update_metric(loss_metric, data_batch.label)
            loss_metric.get_batch_log(n_batch)
        train_acc, train_loss, train_recon_err = loss_metric.get_name_value()
        loss_metric.reset()
        for n_batch, data_batch in enumerate(val_iter):
            module.forward(data_batch)
            module.update_metric(loss_metric, data_batch.label)
            loss_metric.get_batch_log(n_batch)
        val_acc, val_loss, val_recon_err = loss_metric.get_name_value()

        summary_writer.add_scalar('train_acc', train_acc, n_epoch)
        summary_writer.add_scalar('train_loss', train_loss, n_epoch)
        summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch)
        summary_writer.add_scalar('val_acc', val_acc, n_epoch)
        summary_writer.add_scalar('val_loss', val_loss, n_epoch)
        summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch)

        print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss, train_recon_err))
        print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err))
        print('SAVE CHECKPOINT')

        module.save_checkpoint(prefix=model_prefix, epoch=n_epoch)
        n_epoch += 1
        lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch) 
Example #12
Source File: capsulenet.py    From SNIPER-mxnet with Apache License 2.0 4 votes vote down vote up
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay):
    summary_writer = SummaryWriter(args.tblog_dir)
    lr_scheduler = SimpleLRScheduler(learning_rate)
    optimizer_params = {'lr_scheduler': lr_scheduler}
    module.init_params()
    module.init_optimizer(kvstore=kvstore,
                          optimizer=optimizer,
                          optimizer_params=optimizer_params)
    n_epoch = 0
    while True:
        if n_epoch >= num_epoch:
            break
        train_iter.reset()
        val_iter.reset()
        loss_metric.reset()
        for n_batch, data_batch in enumerate(train_iter):
            module.forward_backward(data_batch)
            module.update()
            module.update_metric(loss_metric, data_batch.label)
            loss_metric.get_batch_log(n_batch)
        train_acc, train_loss, train_recon_err = loss_metric.get_name_value()
        loss_metric.reset()
        for n_batch, data_batch in enumerate(val_iter):
            module.forward(data_batch)
            module.update_metric(loss_metric, data_batch.label)
            loss_metric.get_batch_log(n_batch)
        val_acc, val_loss, val_recon_err = loss_metric.get_name_value()

        summary_writer.add_scalar('train_acc', train_acc, n_epoch)
        summary_writer.add_scalar('train_loss', train_loss, n_epoch)
        summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch)
        summary_writer.add_scalar('val_acc', val_acc, n_epoch)
        summary_writer.add_scalar('val_loss', val_loss, n_epoch)
        summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch)

        print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss, train_recon_err))
        print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err))
        print('SAVE CHECKPOINT')

        module.save_checkpoint(prefix=model_prefix, epoch=n_epoch)
        n_epoch += 1
        lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)