Python utils.logging.Logger() Examples

The following are 6 code examples of utils.logging.Logger(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils.logging , or try the search function .
Example #1
Source File: training_sda.py    From d-SNE with Apache License 2.0 6 votes vote down vote up
def create_logger(self):
        """
        Create the logger including the file log and summary log
        :return: logger and summary writer
        """
        if self.args.training:
            logger = Logger(self.args.log, '%s-%s' % (self.args.method, self.args.postfix),
                            rm_exist=self.args.start_epoch == 0)
            logger.update_dict(vars(self.args))

            if self.args.mxboard:
                from mxboard import SummaryWriter
                sw = SummaryWriter(logdir=self.args.log)
            else:
                sw = None
        else:
            logger, sw = None, None

        return logger, sw 
Example #2
Source File: run.py    From graph_distillation with Apache License 2.0 5 votes vote down vote up
def train(opt, model, dataloader):
  # Logging
  logger = logging.Logger(opt.ckpt_path, opt.split)
  stats = logging.Statistics(opt.ckpt_path, opt.split)
  logger.log(opt)

  model.load(opt.load_ckpt_paths, opt.load_opts, opt.load_epoch)
  for epoch in range(1, opt.n_epochs + 1):
    for step, data in enumerate(dataloader, 1):
      # inputs is a list of input of each modality
      inputs, label, _ = data
      ret = model.train(inputs, label)
      update = stats.update(len(label), ret)
      if utils.is_due(step, opt.print_every):
        utils.info('epoch {}/{}, step {}/{}: {}'.format(
            epoch, opt.n_epochs, step, len(dataloader), update))

    logger.log('[Summary] epoch {}/{}: {}'.format(epoch, opt.n_epochs,
                                                  stats.summarize()))

    if utils.is_due(epoch, opt.n_epochs, opt.save_every):
      model.save(epoch)
      stats.save()
      logger.log('***** saved *****')

    if utils.is_due(epoch, opt.lr_decay_at):
      lrs = model.lr_decay()
      logger.log('***** lr decay *****: {}'.format(lrs)) 
Example #3
Source File: run.py    From graph_distillation with Apache License 2.0 5 votes vote down vote up
def test(opt, model, dataloader):
  # Logging
  logger = logging.Logger(opt.ckpt_path, opt.split)
  stats = logging.Statistics(opt.ckpt_path, opt.split)
  logger.log(opt)

  model.load(opt.load_ckpt_paths, opt.load_opts, opt.load_epoch)
  all_scores = []
  video_names = []
  for step, data in enumerate(dataloader, 1):
    inputs, label, vid_name = data
    info_acc, logits, scores = model.test(inputs, label, opt.timestep)

    all_scores.append(scores)
    video_names.append(vid_name[0])
    update = stats.update(logits.shape[0], info_acc)
    if utils.is_due(step, opt.print_every):
      utils.info('step {}/{}: {}'.format(step, len(dataloader), update))

  logger.log('[Summary] {}'.format(stats.summarize()))

  # Evaluate
  iou_thresholds = [0.1, 0.3, 0.5]
  groundtruth_dir = os.path.join(opt.dset_path, opt.dset, 'groundtruth',
                                 'validation/cross-subject')
  assert os.path.exists(groundtruth_dir), '{} does not exist'.format(groundtruth_dir)
  mean_aps = calc_map(opt, all_scores, video_names, groundtruth_dir, iou_thresholds)

  for i in range(len(iou_thresholds)):
    logger.log('IoU: {}, mAP: {}'.format(iou_thresholds[i], mean_aps[i])) 
Example #4
Source File: run.py    From graph_distillation with Apache License 2.0 5 votes vote down vote up
def train(opt, model, dataloader):
  """Train the model."""
  # Logging
  logger = logging.Logger(opt.ckpt_path, opt.split)
  stats = logging.Statistics(opt.ckpt_path, opt.split)
  logger.log(opt)

  model.load(opt.load_ckpt_paths, opt.load_epoch)
  for epoch in range(1, opt.n_epochs + 1):
    for step, data in enumerate(dataloader, 1):
      ret = model.train(*data)
      update = stats.update(data[-1].size(0), ret)
      if utils.is_due(step, opt.print_every):
        utils.info('epoch {}/{}, step {}/{}: {}'.format(
            epoch, opt.n_epochs, step, len(dataloader), update))

    logger.log('[Summary] epoch {}/{}: {}'.format(epoch, opt.n_epochs,
                                                  stats.summarize()))

    if utils.is_due(epoch, opt.n_epochs, opt.save_every):
      model.save(epoch)
      logger.log('***** saved *****')

    if utils.is_due(epoch, opt.lr_decay_at):
      lrs = model.lr_decay()
      logger.log('***** lr decay *****: {}'.format(lrs)) 
Example #5
Source File: run.py    From graph_distillation with Apache License 2.0 5 votes vote down vote up
def test(opt, model, dataloader):
  '''Test model.'''
  # Logging
  logger = logging.Logger(opt.load_ckpt_path, opt.split)
  stats = logging.Statistics(opt.ckpt_path, opt.split)
  logger.log(opt)

  logits, labels = [], []
  model.load(opt.load_ckpt_paths, opt.load_epoch)
  for step, data in enumerate(dataloader, 1):
    inputs, label = data
    info_acc, logit = model.test(inputs, label)
    logits.append(utils.to_numpy(logit.squeeze(0)))
    labels.append(utils.to_numpy(label))
    update = stats.update(label.size(0), info_acc)
    if utils.is_due(step, opt.print_every):
      utils.info('step {}/{}: {}'.format(step, len(dataloader), update))

  logits = np.concatenate(logits, axis=0)
  length, n_classes = logits.shape
  labels = np.concatenate(labels)
  scores = utils.softmax(logits, axis=1)

  # Accuracy
  preds = np.argmax(scores, axis=1)
  acc = np.sum(preds == labels) / length
  # Average precision
  y_true = np.zeros((length, n_classes))
  y_true[np.arange(length), labels] = 1
  aps = average_precision_score(y_true, scores, average=None)
  aps = list(filter(lambda x: not np.isnan(x), aps))
  mAP = np.mean(aps)

  logger.log('[Summary]: {}'.format(stats.summarize()))
  logger.log('Acc: {}, mAP: {}'.format(acc, mAP)) 
Example #6
Source File: run.py    From pymarl with Apache License 2.0 4 votes vote down vote up
def run(_run, _config, _log):

    # check args sanity
    _config = args_sanity_check(_config, _log)

    args = SN(**_config)
    args.device = "cuda" if args.use_cuda else "cpu"

    # setup loggers
    logger = Logger(_log)

    _log.info("Experiment Parameters:")
    experiment_params = pprint.pformat(_config,
                                       indent=4,
                                       width=1)
    _log.info("\n\n" + experiment_params + "\n")

    # configure tensorboard logger
    unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
    args.unique_token = unique_token
    if args.use_tensorboard:
        tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
        tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
        logger.setup_tb(tb_exp_direc)

    # sacred is on by default
    logger.setup_sacred(_run)

    # Run and train
    run_sequential(args=args, logger=logger)

    # Clean up after finishing
    print("Exiting Main")

    print("Stopping all threads")
    for t in threading.enumerate():
        if t.name != "MainThread":
            print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
            t.join(timeout=1)
            print("Thread joined")

    print("Exiting script")

    # Making sure framework really exits
    os._exit(os.EX_OK)