Python logger.Logger() Examples

The following are 20 code examples of logger.Logger(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module logger , or try the search function .
Example #1
Source File: train.py    From visual-interaction-networks-pytorch with MIT License 6 votes vote down vote up
def __init__(self, config, net):
        self.net = net
        self.config = config
        create_dir(self.config.checkpoint_dir)

        dataset = VinDataset(self.config, transform=ToTensor())
        test_dataset = VinTestDataset(self.config, transform=ToTensorV2())
        self.dataloader = DataLoader(dataset, batch_size=self.config.batch_size,
                                     shuffle=True, num_workers=4)
        self.test_dataloader = DataLoader(test_dataset, batch_size=1,
                                          shuffle=True, num_workers=1)
        self.optimizer = optim.Adam(self.net.parameters(), lr=0.0005)
        self.logger = Logger(self.config.log_dir)
        self.construct_cors()
        self.save()
        if config.load:
            self.load() 
Example #2
Source File: eval_deception_score.py    From adaptive-style-transfer with GNU General Public License v3.0 6 votes vote down vote up
def run(extractor, classification_layer, images_df, batch_size=64, logger=Logger()):
    images_df = images_df.copy()
    if len(images_df) == 0:
        print 'No images found!'
        return -1, 0, 0
    probs = extractor.extract(images_df['image_path'].values, [classification_layer],
                              verbose=1, batch_size=batch_size)
    images_df['predicted_class'] = np.argmax(probs, axis=1).tolist()
    is_correct = images_df['label'] == images_df['predicted_class']
    accuracy = float(is_correct.sum()) / len(images_df)

    logger.log('Num images: {}'.format(len(images_df)))
    logger.log('Correctly classified: {}/{}'.format(is_correct.sum(), len(images_df)))
    logger.log('Accuracy: {:.5f}'.format(accuracy))
    logger.log('\n===')
    return accuracy, is_correct.sum(), len(images_df)


# image filenames must be in format "{content_name}_stylized_{artist_name}.jpg"
# uncomment methods which you want to evaluate and set the paths to the folders with the stylized images 
Example #3
Source File: trainer.py    From EvolveGCN with Apache License 2.0 6 votes vote down vote up
def __init__(self,args, splitter, gcn, classifier, comp_loss, dataset, num_classes):
		self.args = args
		self.splitter = splitter
		self.tasker = splitter.tasker
		self.gcn = gcn
		self.classifier = classifier
		self.comp_loss = comp_loss

		self.num_nodes = dataset.num_nodes
		self.data = dataset
		self.num_classes = num_classes

		self.logger = logger.Logger(args, self.num_classes)

		self.init_optimizers(args)

		if self.tasker.is_static:
			adj_matrix = u.sparse_prepare_tensor(self.tasker.adj_matrix, torch_size = [self.num_nodes], ignore_batch_dim = False)
			self.hist_adj_list = [adj_matrix]
			self.hist_ndFeats_list = [self.tasker.nodes_feats.float()] 
Example #4
Source File: gdqn.py    From KG-A2C with MIT License 5 votes vote down vote up
def configure_logger(log_dir):
    logger.configure(log_dir, format_strs=['log'])
    global tb
    tb = logger.Logger(log_dir, [logger.make_output_format('tensorboard', log_dir),
                                 logger.make_output_format('csv', log_dir),
                                 logger.make_output_format('stdout', log_dir)])
    global log
    log = logger.log 
Example #5
Source File: solver.py    From AUNets with MIT License 5 votes vote down vote up
def build_tensorboard(self):
        from logger import Logger
        self.logger = Logger(self.log_path)

    # ====================================================================#
    # ====================================================================# 
Example #6
Source File: trainer.py    From causal-infogan with MIT License 5 votes vote down vote up
def configure_logger(self):
        self.logger = Logger(os.path.join(self.out_dir, "log"))
        configure(os.path.join(self.out_dir, "log"), flush_secs=5) 
Example #7
Source File: tester.py    From RL-GAN-Net with MIT License 5 votes vote down vote up
def build_tensorboard(self):
        from logger import Logger
        self.logger = Logger(self.log_path) 
Example #8
Source File: trainer.py    From RL-GAN-Net with MIT License 5 votes vote down vote up
def build_tensorboard(self):
        from logger import Logger
        self.logger = Logger(self.log_path) 
Example #9
Source File: train.py    From Python-Reinforcement-Learning-Projects with MIT License 5 votes vote down vote up
def run(self):
        
        with tf.Session() as sess:
            saver = tf.train.Saver()
            logger = Logger(sess=sess, directory=self.directory)
            self.value_network.set_session(sess)
            sess.run(tf.global_variables_initializer())
            
            for i in range(self.num_episodes):
                logger.set_step(step=i)
                # Generate simulation paths
                self.parallel_sampler.update_policy_params(sess)
                paths = self.parallel_sampler.generate_paths(max_num_samples=self.sampler_max_samples)
                paths = self.parallel_sampler.truncate_paths(paths, max_num_samples=self.sampler_max_samples)
                # Compute the average reward of the sampled paths
                logger.add_summary(sess.run(self.summary_op, 
                                            feed_dict={self.average_reward: 
                                                       numpy.mean([path['total_reward'] for path in paths])}))
                # Calculate discounted cumulative rewards and advantages
                samples = self.sampler.process_paths(paths, self.value_network, self.discount, self.gae_lambda,
                                                     self.sampler_center_advantage, positive_advantage=False)
                # Update policy network
                self.trpo.optimize_policy(sess, samples, logger, subsample_rate=self.subsample_rate)
                # Update value network
                self.value_network.train(paths)
                # Save the model
                if (i + 1) % 10 == 0:
                    saver.save(sess, os.path.join(self.directory, '{}.ckpt'.format(self.task)))
                # Print infos
                logger.flush() 
Example #10
Source File: solver.py    From SHN-based-2D-face-alignment with MIT License 5 votes vote down vote up
def build_tensorboard(self):
        """Build a tensorboard logger."""
        from logger import Logger
        self.logger = Logger(self.log_dir) 
Example #11
Source File: solver.py    From stargan with MIT License 5 votes vote down vote up
def build_tensorboard(self):
        """Build a tensorboard logger."""
        from logger import Logger
        self.logger = Logger(self.log_dir) 
Example #12
Source File: utils.py    From PIXOR with MIT License 5 votes vote down vote up
def get_logger(config, mode='train'):
    folder = os.path.join('logs', config['name'], mode)
    if not os.path.exists(folder):
        os.makedirs(folder)
    return logger.Logger(folder) 
Example #13
Source File: core.py    From M2Det with MIT License 5 votes vote down vote up
def set_logger(status):
    if status:
        from logger import Logger
        date = time.strftime("%m_%d_%H_%M") + '_log'
        log_path = './logs/'+ date
        if os.path.exists(log_path):
            shutil.rmtree(log_path)
        os.makedirs(log_path)
        logger = Logger(log_path)
        return logger
    else:
        pass 
Example #14
Source File: solver.py    From adversarial-object-removal with MIT License 5 votes vote down vote up
def build_tensorboard(self):
        from logger import Logger
        self.logger = Logger(self.log_path) 
Example #15
Source File: sign.py    From ogb with MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description='OGBN-Products (SIGN)')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--num_layers', type=int, default=3)
    parser.add_argument('--hidden_channels', type=int, default=256)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--epochs', type=int, default=200)
    parser.add_argument('--runs', type=int, default=10)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = PygNodePropPredDataset(name='ogbn-products')
    split_idx = dataset.get_idx_split()
    data = SIGN(args.num_layers)(dataset[0])  # This might take a while.

    xs = [data.x] + [data[f'x{i}'] for i in range(1, args.num_layers + 1)]
    xs_train = [x[split_idx['train']].to(device) for x in xs]
    xs_valid = [x[split_idx['valid']].to(device) for x in xs]
    xs_test = [x[split_idx['test']].to(device) for x in xs]

    y_train_true = data.y[split_idx['train']].to(device)
    y_valid_true = data.y[split_idx['valid']].to(device)
    y_test_true = data.y[split_idx['test']].to(device)

    model = MLP(data.x.size(-1), args.hidden_channels, dataset.num_classes, args.num_layers,
                args.dropout).to(device)

    evaluator = Evaluator(name='ogbn-products')
    logger = Logger(args.runs, args)

    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        for epoch in range(1, 1 + args.epochs):
            loss = train(model, xs_train, y_train_true, optimizer)

            train_acc = test(model, xs_train, y_train_true, evaluator)
            valid_acc = test(model, xs_valid, y_valid_true, evaluator)
            test_acc = test(model, xs_test, y_test_true, evaluator)
            result = (train_acc, valid_acc, test_acc)
            logger.add_result(run, result)

            if epoch % args.log_steps == 0:
                train_acc, valid_acc, test_acc = result
                print(f'Run: {run + 1:02d}, '
                      f'Epoch: {epoch:02d}, '
                      f'Loss: {loss:.4f}, '
                      f'Train: {100 * train_acc:.2f}%, '
                      f'Valid: {100 * valid_acc:.2f}%, '
                      f'Test: {100 * test_acc:.2f}%')

        logger.print_statistics(run)
    logger.print_statistics() 
Example #16
Source File: mlp.py    From ogb with MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description='OGBN-Products (MLP)')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--use_node_embedding', action='store_true')
    parser.add_argument('--num_layers', type=int, default=3)
    parser.add_argument('--hidden_channels', type=int, default=256)
    parser.add_argument('--dropout', type=float, default=0.0)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--epochs', type=int, default=300)
    parser.add_argument('--runs', type=int, default=10)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = PygNodePropPredDataset(name='ogbn-products')
    split_idx = dataset.get_idx_split()
    data = dataset[0]

    x = data.x
    if args.use_node_embedding:
        embedding = torch.load('embedding.pt', map_location='cpu')
        x = torch.cat([x, embedding], dim=-1)
    x = x.to(device)

    y_true = data.y.to(device)
    train_idx = split_idx['train'].to(device)

    model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes, args.num_layers,
                args.dropout).to(device)

    evaluator = Evaluator(name='ogbn-products')
    logger = Logger(args.runs, args)

    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        for epoch in range(1, 1 + args.epochs):
            loss = train(model, x, y_true, train_idx, optimizer)
            result = test(model, x, y_true, split_idx, evaluator)
            logger.add_result(run, result)

            if epoch % args.log_steps == 0:
                train_acc, valid_acc, test_acc = result
                print(f'Run: {run + 1:02d}, '
                      f'Epoch: {epoch:02d}, '
                      f'Loss: {loss:.4f}, '
                      f'Train: {100 * train_acc:.2f}%, '
                      f'Valid: {100 * valid_acc:.2f}%, '
                      f'Test: {100 * test_acc:.2f}%')

        logger.print_statistics(run)
    logger.print_statistics() 
Example #17
Source File: mlp.py    From ogb with MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description='OGBN-MAG (MLP)')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--use_node_embedding', action='store_true')
    parser.add_argument('--num_layers', type=int, default=3)
    parser.add_argument('--hidden_channels', type=int, default=256)
    parser.add_argument('--dropout', type=float, default=0.0)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--epochs', type=int, default=500)
    parser.add_argument('--runs', type=int, default=10)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = PygNodePropPredDataset(name='ogbn-mag')
    split_idx = dataset.get_idx_split()
    data = dataset[0]
    print(data)

    x = data.x_dict['paper']
    if args.use_node_embedding:
        embedding = torch.load('embedding.pt', map_location='cpu')
        x = torch.cat([x, embedding], dim=-1)
    x = x.to(device)

    y_true = data.y_dict['paper'].to(device)
    train_idx = split_idx['train']['paper'].to(device)

    model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes,
                args.num_layers, args.dropout).to(device)

    evaluator = Evaluator(name='ogbn-mag')
    logger = Logger(args.runs, args)

    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        for epoch in range(1, 1 + args.epochs):
            loss = train(model, x, y_true, train_idx, optimizer)
            result = test(model, x, y_true, split_idx, evaluator)
            logger.add_result(run, result)

            if epoch % args.log_steps == 0:
                train_acc, valid_acc, test_acc = result
                print(f'Run: {run + 1:02d}, '
                      f'Epoch: {epoch:02d}, '
                      f'Loss: {loss:.4f}, '
                      f'Train: {100 * train_acc:.2f}%, '
                      f'Valid: {100 * valid_acc:.2f}%, '
                      f'Test: {100 * test_acc:.2f}%')

        logger.print_statistics(run)
    logger.print_statistics() 
Example #18
Source File: mlp.py    From ogb with MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description='OGBN-Arxiv (MLP)')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--use_node_embedding', action='store_true')
    parser.add_argument('--num_layers', type=int, default=3)
    parser.add_argument('--hidden_channels', type=int, default=256)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--epochs', type=int, default=500)
    parser.add_argument('--runs', type=int, default=10)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = PygNodePropPredDataset(name='ogbn-arxiv')
    split_idx = dataset.get_idx_split()
    data = dataset[0]

    x = data.x
    if args.use_node_embedding:
        embedding = torch.load('embedding.pt', map_location='cpu')
        x = torch.cat([x, embedding], dim=-1)
    x = x.to(device)

    y_true = data.y.to(device)
    train_idx = split_idx['train'].to(device)

    model = MLP(x.size(-1), args.hidden_channels, dataset.num_classes,
                args.num_layers, args.dropout).to(device)

    evaluator = Evaluator(name='ogbn-arxiv')
    logger = Logger(args.runs, args)

    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        for epoch in range(1, 1 + args.epochs):
            loss = train(model, x, y_true, train_idx, optimizer)
            result = test(model, x, y_true, split_idx, evaluator)
            logger.add_result(run, result)

            if epoch % args.log_steps == 0:
                train_acc, valid_acc, test_acc = result
                print(f'Run: {run + 1:02d}, '
                      f'Epoch: {epoch:02d}, '
                      f'Loss: {loss:.4f}, '
                      f'Train: {100 * train_acc:.2f}%, '
                      f'Valid: {100 * valid_acc:.2f}%, '
                      f'Test: {100 * test_acc:.2f}%')

        logger.print_statistics(run)
    logger.print_statistics() 
Example #19
Source File: mlp.py    From ogb with MIT License 4 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description='OGBN-Proteins (MLP)')
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--log_steps', type=int, default=1)
    parser.add_argument('--use_node_embedding', action='store_true')
    parser.add_argument('--num_layers', type=int, default=3)
    parser.add_argument('--hidden_channels', type=int, default=256)
    parser.add_argument('--dropout', type=float, default=0.5)
    parser.add_argument('--lr', type=float, default=0.01)
    parser.add_argument('--epochs', type=int, default=1000)
    parser.add_argument('--eval_steps', type=int, default=5)
    parser.add_argument('--runs', type=int, default=10)
    args = parser.parse_args()
    print(args)

    device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
    device = torch.device(device)

    dataset = PygNodePropPredDataset(name='ogbn-proteins')
    split_idx = dataset.get_idx_split()
    data = dataset[0]

    x = scatter(data.edge_attr, data.edge_index[0], dim=0,
                dim_size=data.num_nodes, reduce='mean').to('cpu')

    if args.use_node_embedding:
        embedding = torch.load('embedding.pt', map_location='cpu')
        x = torch.cat([x, embedding], dim=-1)

    x = x.to(device)
    y_true = data.y.to(device)
    train_idx = split_idx['train'].to(device)

    model = MLP(x.size(-1), args.hidden_channels, 112, args.num_layers,
                args.dropout).to(device)

    evaluator = Evaluator(name='ogbn-proteins')
    logger = Logger(args.runs, args)

    for run in range(args.runs):
        model.reset_parameters()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        for epoch in range(1, 1 + args.epochs):
            loss = train(model, x, y_true, train_idx, optimizer)

            if epoch % args.eval_steps == 0:
                result = test(model, x, y_true, split_idx, evaluator)
                logger.add_result(run, result)

                if epoch % args.log_steps == 0:
                    train_rocauc, valid_rocauc, test_rocauc = result
                    print(f'Run: {run + 1:02d}, '
                          f'Epoch: {epoch:02d}, '
                          f'Loss: {loss:.4f}, '
                          f'Train: {100 * train_rocauc:.2f}%, '
                          f'Valid: {100 * valid_rocauc:.2f}% '
                          f'Test: {100 * test_rocauc:.2f}%')

        logger.print_statistics(run)
    logger.print_statistics() 
Example #20
Source File: train.py    From Visual-Template-Free-Form-Parsing with GNU General Public License v3.0 4 votes vote down vote up
def main(config, resume):
    set_procname(config['name'])
    #np.random.seed(1234) I don't have a way of restarting the DataLoader at the same place, so this makes it totaly random
    train_logger = Logger()

    split = config['split'] if 'split' in config else 'train'
    data_loader, valid_data_loader = getDataLoader(config,split)
    #valid_data_loader = data_loader.split_validation()

    model = eval(config['arch'])(config['model'])
    model.summary()
    if type(config['loss'])==dict:
        loss={}#[eval(l) for l in config['loss']]
        for name,l in config['loss'].items():
            loss[name]=eval(l)
    else:
        loss = eval(config['loss'])
    if type(config['metrics'])==dict:
        metrics={}
        for name,m in config['metrics'].items():
            metrics[name]=[eval(metric) for metric in m]
    else:
        metrics = [eval(metric) for metric in config['metrics']]

    if 'class' in config['trainer']:
        trainerClass = eval(config['trainer']['class'])
    else:
        trainerClass = Trainer
    trainer = trainerClass(model, loss, metrics,
                      resume=resume,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      train_logger=train_logger)

    def handleSIGINT(sig, frame):
        trainer.save()
        sys.exit(0)
    signal.signal(signal.SIGINT, handleSIGINT)

    print("Begin training")
    trainer.train()