Python torch.utils.tensorboard.SummaryWriter() Examples

The following are 30 code examples of torch.utils.tensorboard.SummaryWriter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.utils.tensorboard , or try the search function .
Example #1
Source File: logger.py    From pytorch_sac_ae with MIT License 7 votes vote down vote up
def __init__(self, log_dir, use_tb=True, config='rl'):
        self._log_dir = log_dir
        if use_tb:
            tb_dir = os.path.join(log_dir, 'tb')
            if os.path.exists(tb_dir):
                shutil.rmtree(tb_dir)
            self._sw = SummaryWriter(tb_dir)
        else:
            self._sw = None
        self._train_mg = MetersGroup(
            os.path.join(log_dir, 'train.log'),
            formating=FORMAT_CONFIG[config]['train']
        )
        self._eval_mg = MetersGroup(
            os.path.join(log_dir, 'eval.log'),
            formating=FORMAT_CONFIG[config]['eval']
        ) 
Example #2
Source File: trainer.py    From nussl with MIT License 6 votes vote down vote up
def add_tensorboard_handler(tensorboard_folder, engine, every_iteration=False):
    """
    Every key in engine.state.epoch_history[-1] is logged to TensorBoard.
    
    Args:
        tensorboard_folder (str): Where the tensorboard logs should go.
        trainer (ignite.Engine): The engine to log.
        every_iteration (bool, optional): Whether to also log the values at every 
          iteration.
    """

    @engine.on(ValidationEvents.VALIDATION_COMPLETED)
    def log_to_tensorboard(engine):
        writer = SummaryWriter(tensorboard_folder)
        for key in engine.state.epoch_history:
            writer.add_scalar(
                key, engine.state.epoch_history[key][-1], engine.state.epoch)

    if every_iteration:
        @engine.on(Events.ITERATION_COMPLETED)
        def log_iteration_to_tensorboard(engine):
            writer = SummaryWriter(tensorboard_folder)
            for key in engine.state.iter_history:
                writer.add_scalar(
                    key, engine.state.iter_history[key][-1], engine.state.iteration) 
Example #3
Source File: test_reporter.py    From espnet with Apache License 2.0 6 votes vote down vote up
def test_tensorboard_add_scalar(tmp_path: Path):
    reporter = Reporter()
    reporter.set_epoch(1)
    key1 = uuid.uuid4().hex
    with reporter.observe(key1) as sub:
        stats1 = {"aa": 0.6}
        sub.register(stats1)

    reporter.set_epoch(1)
    with reporter.observe(key1) as sub:
        # Skip epoch=2
        sub.register({})

    reporter.set_epoch(3)
    with reporter.observe(key1) as sub:
        stats1 = {"aa": 0.6}
        sub.register(stats1)

    if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
        from torch.utils.tensorboard import SummaryWriter
    else:
        from tensorboardX import SummaryWriter
    writer = SummaryWriter(tmp_path)
    reporter.tensorboard_add_scalar(writer) 
Example #4
Source File: reporting.py    From mead-baseline with Apache License 2.0 6 votes vote down vote up
def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.use_tf = True
        # Base dir is often the dir created to save the model into
        base_dir = kwargs.get('base_dir', '.')
        log_dir = os.path.expanduser(kwargs.get('log_dir', 'runs'))
        if not os.path.isabs(log_dir):
            log_dir = os.path.join(base_dir, log_dir)
        # Run dir is the name of an individual run
        run_dir = kwargs.get('run_dir')
        pid = str(os.getpid())
        run_dir = '{}-{}'.format(run_dir, pid) if run_dir is not None else pid
        log_dir = os.path.join(log_dir, run_dir)

        try:
            from torch.utils.tensorboard import SummaryWriter
            self._log = SummaryWriter(log_dir)
            self.use_tf = False
        except:
            import tensorflow as tf
            file_writer = tf.summary.create_file_writer(log_dir)
            file_writer.set_as_default()
            self._log_scalar = tf.summary.scalar 
Example #5
Source File: tensorboard_backend.py    From delira with GNU Affero General Public License v3.0 6 votes vote down vote up
def __init__(self, writer_kwargs=None,
                 abort_event: Event = None, queue: Queue = None):
        """

        Parameters
        ----------
        writer_kwargs : dict
            arguments to initialize a writer
        abort_event : :class:`threading.Event`
            the abortion event
        queue : :class:`queue.Queue`
            the queue holding all logging tasks
        """

        if writer_kwargs is None:
            writer_kwargs = {}

        if "logdir" in writer_kwargs:
            writer_kwargs[LOGDIR_KWARG] = writer_kwargs.pop("logdir")
        elif "log_dir" in writer_kwargs:
            writer_kwargs[LOGDIR_KWARG] = writer_kwargs.pop("log_dir")

        super().__init__(SummaryWriter, writer_kwargs,
                         abort_event, queue) 
Example #6
Source File: tensorboard.py    From pytorch-lightning with Apache License 2.0 6 votes vote down vote up
def experiment(self) -> SummaryWriter:
        r"""
        Actual tensorboard object. To use TensorBoard features in your
        :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.

        Example::

            self.logger.experiment.some_tensorboard_function()

        """
        if self._experiment is not None:
            return self._experiment

        assert rank_zero_only.rank == 0, 'tried to init log dirs in non global_rank=0'
        os.makedirs(self.root_dir, exist_ok=True)
        self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs)
        return self._experiment 
Example #7
Source File: tensorboard.py    From mmcv with Apache License 2.0 6 votes vote down vote up
def before_run(self, runner):
        if TORCH_VERSION < '1.1' or TORCH_VERSION == 'parrots':
            try:
                from tensorboardX import SummaryWriter
            except ImportError:
                raise ImportError('Please install tensorboardX to use '
                                  'TensorboardLoggerHook.')
        else:
            try:
                from torch.utils.tensorboard import SummaryWriter
            except ImportError:
                raise ImportError(
                    'Please run "pip install future tensorboard" to install '
                    'the dependencies to use torch.utils.tensorboard '
                    '(applicable to PyTorch 1.1 or higher)')

        if self.log_dir is None:
            self.log_dir = osp.join(runner.work_dir, 'tf_logs')
        self.writer = SummaryWriter(self.log_dir) 
Example #8
Source File: super.py    From jdit with Apache License 2.0 6 votes vote down vote up
def graph(self, model: Union[torch.nn.Module, torch.nn.DataParallel, Model], name: str, use_gpu: bool,
              *input_shape):
        if isinstance(model, torch.nn.Module):
            proto_model: torch.nn.Module = model
            num_params: int = self._count_params(proto_model)
        elif isinstance(model, torch.nn.DataParallel):
            proto_model: torch.nn.Module = model.module
            num_params: int = self._count_params(proto_model)
        elif isinstance(model, Model):
            proto_model: torch.nn.Module = model.model
            num_params: int = model.num_params
        else:
            raise TypeError("Only `nn.Module`, `nn.DataParallel` and `Model` can be passed!")
        model_logdir = os.path.join(self.logdir, name)
        self._build_dir(model_logdir)
        writer_for_model = SummaryWriter(log_dir=model_logdir)

        input_list = tuple(torch.ones(shape).cuda() if use_gpu else torch.ones(shape) for shape in input_shape)
        self.scalars({'ParamsNum': num_params}, 0, tag="ParamsNum")
        self.scalars({'ParamsNum': num_params}, 1, tag="ParamsNum")
        proto_model(*input_list)
        writer_for_model.add_graph(proto_model, input_list)
        writer_for_model.close() 
Example #9
Source File: tensorboard_logger.py    From Human-Pose-Transfer with MIT License 6 votes vote down vote up
def __init__(self, log_dir):
        try:
            from torch.utils.tensorboard import SummaryWriter
        except ImportError:
            try:
                from tensorboardX import SummaryWriter
            except ImportError:
                raise RuntimeError("This contrib module requires tensorboardX to be installed. "
                                   "Please install it with command: \n pip install tensorboardX")

        try:
            self.writer = SummaryWriter(log_dir)
        except TypeError as err:
            if "type object got multiple values for keyword argument 'logdir'" == str(err):
                self.writer = SummaryWriter(log_dir=log_dir)
                warnings.warn('tensorboardX version < 1.7 will not be supported '
                              'after ignite 0.3.0; please upgrade',
                              DeprecationWarning)
            else:
                raise err 
Example #10
Source File: test_tensorboardX.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_writing_stack(self):
        with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2:
            writer1 = SummaryWriter(tmp_dir1)
            writer1.add_scalar = MagicMock()
            writer2 = SummaryWriter(tmp_dir2)
            writer2.add_scalar = MagicMock()
            with summary_writer_context(writer1):
                with summary_writer_context(writer2):
                    SummaryWriterContext.add_scalar("test2", torch.ones(1))
                SummaryWriterContext.add_scalar("test1", torch.zeros(1))
            writer1.add_scalar.assert_called_once_with(
                "test1", torch.zeros(1), global_step=0
            )
            writer2.add_scalar.assert_called_once_with(
                "test2", torch.ones(1), global_step=0
            ) 
Example #11
Source File: events.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def put_histogram(self, hist_name, hist_tensor, bins=1000):
        """
        Create a histogram from a tensor.

        Args:
            hist_name (str): The name of the histogram to put into tensorboard.
            hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted
                into a histogram.
            bins (int): Number of histogram bins.
        """
        ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item()

        # Create a histogram with PyTorch
        hist_counts = torch.histc(hist_tensor, bins=bins)
        hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32)

        # Parameter for the add_histogram_raw function of SummaryWriter
        hist_params = dict(
            tag=hist_name,
            min=ht_min,
            max=ht_max,
            num=len(hist_tensor),
            sum=float(hist_tensor.sum()),
            sum_squares=float(torch.sum(hist_tensor ** 2)),
            bucket_limits=hist_edges[1:].tolist(),
            bucket_counts=hist_counts.tolist(),
            global_step=self._iter,
        )
        self._histograms.append(hist_params) 
Example #12
Source File: events.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
        """
        Args:
            log_dir (str): the directory to save the output events
            window_size (int): the scalars will be median-smoothed by this window size

            kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
        """
        self._window_size = window_size
        from torch.utils.tensorboard import SummaryWriter

        self._writer = SummaryWriter(log_dir, **kwargs) 
Example #13
Source File: test_tensorboardX.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_writing(self):
        with TemporaryDirectory() as tmp_dir:
            writer = SummaryWriter(tmp_dir)
            writer.add_scalar = MagicMock()
            with summary_writer_context(writer):
                SummaryWriterContext.add_scalar("test", torch.ones(1))
            writer.add_scalar.assert_called_once_with(
                "test", torch.ones(1), global_step=0
            ) 
Example #14
Source File: Summary.py    From VideoSuperResolution with MIT License 5 votes vote down vote up
def __init__(self, log_path, key=None):
    if key is not None:
      self.key = hash(key)
    else:
      self.key = hash(str(log_path))
    self._logd = log_path
    self.writer = SummaryWriter(str(log_path))
    _writer_container[self.key] = self 
Example #15
Source File: tensorboard.py    From pytracking with GNU General Public License v3.0 5 votes vote down vote up
def write_info(self, module_name, script_name, description):
        tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info'))
        tb_info_writer.add_text('Modulet_name', module_name)
        tb_info_writer.add_text('Script_name', script_name)
        tb_info_writer.add_text('Description', description)
        tb_info_writer.close() 
Example #16
Source File: tensorboard.py    From pytracking with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, directory, loader_names):
        self.directory = directory
        self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names}) 
Example #17
Source File: logger.py    From DeepPrivacy with MIT License 5 votes vote down vote up
def __init__(self, logdir, generated_data_dir):
        self.writer = SummaryWriter(
            os.path.join(logdir, "train")
        )
        self.validation_writer = SummaryWriter(
            os.path.join(logdir, "val"))
        self.global_step = 0
        self.image_dir = generated_data_dir

        os.makedirs(self.image_dir, exist_ok=True)
        os.makedirs(os.path.join(self.image_dir, "validation"), exist_ok=True)
        os.makedirs(os.path.join(self.image_dir, "transition"), exist_ok=True) 
Example #18
Source File: callbacks.py    From pytorch-tools with MIT License 5 votes vote down vote up
def on_begin(self):
        os.makedirs(self.log_dir, exist_ok=True)
        self.writer = SummaryWriter(self.log_dir) 
Example #19
Source File: exp_utils.py    From RegRCNN with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, log_dir, server_env=True, fold="all", sysmetrics_interval=2):
        self.pylogger = logging.getLogger(name)
        self.tboard = SummaryWriter(log_dir=os.path.join(log_dir, "tboard"))
        self.times = {}
        self.log_dir = log_dir
        self.fold = str(fold)
        self.server_env = server_env

        self.pylogger.setLevel(logging.DEBUG)
        self.log_file = os.path.join(log_dir, "fold_"+self.fold, 'exec.log')
        os.makedirs(os.path.dirname(self.log_file), exist_ok=True)
        self.pylogger.addHandler(logging.FileHandler(self.log_file))
        if not server_env:
            self.pylogger.addHandler(ColorHandler())
        else:
            self.pylogger.addHandler(logging.StreamHandler())
        self.pylogger.propagate = False

        # monitor system metrics (cpu, mem, ...)
        if not server_env and sysmetrics_interval > 0:
            self.sysmetrics = pd.DataFrame(
                columns=["global_step", "rel_time", r"CPU (%)", "mem_used (GB)", r"mem_used (%)",
                         r"swap_used (GB)", r"gpu_utilization (%)"], dtype="float16")
            for device in range(torch.cuda.device_count()):
                self.sysmetrics[
                    "mem_allocd (GB) by torch on {:10s}".format(torch.cuda.get_device_name(device))] = np.nan
                self.sysmetrics[
                    "mem_cached (GB) by torch on {:10s}".format(torch.cuda.get_device_name(device))] = np.nan
            self.sysmetrics_start(sysmetrics_interval)
            pass
        else:
            print("NOT logging sysmetrics") 
Example #20
Source File: reporter.py    From espnet with Apache License 2.0 5 votes vote down vote up
def tensorboard_add_scalar(self, summary_writer: SummaryWriter, epoch: int = None):
        if epoch is None:
            epoch = self.get_epoch()

        keys2 = set.union(*[set(self.get_keys2(k)) for k in self.get_keys()])
        for key2 in keys2:
            summary_writer.add_scalars(
                key2,
                {
                    k: self.stats[epoch][k][key2]
                    for k in self.get_keys(epoch)
                    if key2 in self.stats[epoch][k]
                },
                epoch,
            ) 
Example #21
Source File: monitor.py    From lsq-net with MIT License 5 votes vote down vote up
def __init__(self, logger, log_dir):
        super(TensorBoardMonitor, self).__init__()
        self.writer = SummaryWriter(log_dir / 'tb_runs')
        logger.info('TensorBoard data directory: %s/tb_runs' % log_dir) 
Example #22
Source File: lamb.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
    """Log a histogram of trust ratio scalars in across layers."""
    results = collections.defaultdict(list)
    for group in optimizer.param_groups:
        for p in group['params']:
            state = optimizer.state[p]
            for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
                if i in state:
                    results[i].append(state[i])

    for k, v in results.items():
        event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count) 
Example #23
Source File: events.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
        """
        Args:
            log_dir (str): the directory to save the output events
            window_size (int): the scalars will be median-smoothed by this window size
            kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
        """
        self._window_size = window_size
        from torch.utils.tensorboard import SummaryWriter

        self._writer = SummaryWriter(log_dir, **kwargs) 
Example #24
Source File: helpers.py    From joeynmt with Apache License 2.0 5 votes vote down vote up
def store_attention_plots(attentions: np.array, targets: List[List[str]],
                          sources: List[List[str]],
                          output_prefix: str, indices: List[int],
                          tb_writer: Optional[SummaryWriter] = None,
                          steps: int = 0) -> None:
    """
    Saves attention plots.

    :param attentions: attention scores
    :param targets: list of tokenized targets
    :param sources: list of tokenized sources
    :param output_prefix: prefix for attention plots
    :param indices: indices selected for plotting
    :param tb_writer: Tensorboard summary writer (optional)
    :param steps: current training steps, needed for tb_writer
    :param dpi: resolution for images
    """
    for i in indices:
        if i >= len(sources):
            continue
        plot_file = "{}.{}.pdf".format(output_prefix, i)
        src = sources[i]
        trg = targets[i]
        attention_scores = attentions[i].T
        try:
            fig = plot_heatmap(scores=attention_scores, column_labels=trg,
                               row_labels=src, output_path=plot_file,
                               dpi=100)
            if tb_writer is not None:
                # lower resolution for tensorboard
                fig = plot_heatmap(scores=attention_scores, column_labels=trg,
                                   row_labels=src, output_path=None, dpi=50)
                tb_writer.add_figure("attention/{}.".format(i), fig,
                                     global_step=steps)
        # pylint: disable=bare-except
        except:
            print("Couldn't plot example {}: src len {}, trg len {}, "
                  "attention scores shape {}".format(i, len(src), len(trg),
                                                     attention_scores.shape))
            continue 
Example #25
Source File: meter.py    From theconf with MIT License 5 votes vote down vote up
def __init__(self, *keys, tensorboard_path=None, prefixs=['train', 'valid']):
        super(AverageMeter, self).__init__()
        self.step = 0
        self.keys = keys
        self.reset()

        self.tensorboard_path = tensorboard_path
        if tensorboard_path:
            self.writers = {prefix: SummaryWriter(os.path.join(tensorboard_path, prefix)) for prefix in prefixs}
        else:
            self.writers = {} 
Example #26
Source File: tensorboard_writer.py    From snorkel with Apache License 2.0 5 votes vote down vote up
def cleanup(self) -> None:
        """Close the ``SummaryWriter``."""
        self.writer.close() 
Example #27
Source File: tensorboard_writer.py    From snorkel with Apache License 2.0 5 votes vote down vote up
def __init__(self, **kwargs: Any) -> None:
        super().__init__(**kwargs)
        self.writer = SummaryWriter(self.log_dir) 
Example #28
Source File: super.py    From jdit with Apache License 2.0 5 votes vote down vote up
def graph_lazy(self, model: Union[torch.nn.Module, torch.nn.DataParallel, Model], name: str):
        if isinstance(model, torch.nn.Module):
            proto_model: torch.nn.Module = model
            num_params: int = self._count_params(proto_model)
        elif isinstance(model, torch.nn.DataParallel):
            proto_model: torch.nn.Module = model.module
            num_params: int = self._count_params(proto_model)
        elif isinstance(model, Model):
            proto_model: torch.nn.Module = model.model
            num_params: int = model.num_params
        else:
            raise TypeError("Only `nn.Module`, `nn.DataParallel` and `Model` can be passed!, got %s instead" % model)
        model_logdir = os.path.join(self.logdir, name)
        self._build_dir(model_logdir)
        self.scalars({'ParamsNum': num_params}, 0, tag=name)
        self.scalars({'ParamsNum': num_params}, 1, tag=name)

        def hook(model, layer_input, layer_output):
            writer_for_model = SummaryWriter(log_dir=model_logdir)
            input_for_test = tuple(i[0].detach().clone().unsqueeze(0) for i in layer_input)
            handel.remove()
            if isinstance(proto_model, torch.nn.DataParallel):
                writer_for_model.add_graph(proto_model.module, input_for_test)
            else:
                writer_for_model.add_graph(proto_model, input_for_test)
            writer_for_model.close()
            del writer_for_model

        handel = model.register_forward_hook(hook=hook) 
Example #29
Source File: super.py    From jdit with Apache License 2.0 5 votes vote down vote up
def __init__(self, logdir: str):
        self.logdir = logdir
        self.writer = SummaryWriter(logdir)
        self._build_dir(logdir)
        self.training_progress_images = []
        self.gif_duration = 0.5
        self.handel = None 
Example #30
Source File: test_tensorboardX.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_swallowing_histogram_value_error(self):
        with TemporaryDirectory() as tmp_dir:
            writer = SummaryWriter(tmp_dir)
            with summary_writer_context(writer):
                SummaryWriterContext.add_histogram("bad_histogram", torch.ones(100, 1))