Python wandb.init() Examples

The following are 30 code examples of wandb.init(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module wandb , or try the search function .
Example #1
Source File: wandb.py    From pytorch-lightning with Apache License 2.0 6 votes vote down vote up
def experiment(self) -> Run:
        r"""

        Actual wandb object. To use wandb features in your
        :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.

        Example::

            self.logger.experiment.some_wandb_function()

        """
        if self._experiment is None:
            if self._offline:
                os.environ['WANDB_MODE'] = 'dryrun'
            self._experiment = wandb.init(
                name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,
                reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,
                group=self._group)
            # save checkpoints in wandb dir to upload on W&B servers
            if self._log_model:
                self.save_dir = self._experiment.dir
        return self._experiment 
Example #2
Source File: ppo2_continuous_action.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self):
        super(Policy, self).__init__()
        self.fc1 = nn.Linear(np.array(env.observation_space.shape).prod(), 120)
        self.fc2 = nn.Linear(120, 84)
        self.mean = nn.Linear(84, np.prod(env.action_space.shape))
        self.logstd = nn.Parameter(torch.zeros(1, np.prod(env.action_space.shape)))

        if args.pol_layer_norm:
            self.ln1 = torch.nn.LayerNorm(120)
            self.ln2 = torch.nn.LayerNorm(84)
        if args.weights_init == "orthogonal":
            torch.nn.init.orthogonal_(self.fc1.weight)
            torch.nn.init.orthogonal_(self.fc2.weight)
            torch.nn.init.orthogonal_(self.mean.weight)
        elif args.weights_init == "xavier":
            torch.nn.init.xavier_uniform_(self.fc1.weight)
            torch.nn.init.xavier_uniform_(self.fc2.weight)
            torch.nn.init.xavier_uniform_(self.mean.weight)
        else:
            raise NotImplementedError 
Example #3
Source File: ppo2.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self):
        super(Value, self).__init__()
        self.fc1 = nn.Linear(np.array(env.observation_space.shape).prod(), 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 1)

        if args.weights_init == "orthogonal":
            torch.nn.init.orthogonal_(self.fc1.weight)
            torch.nn.init.orthogonal_(self.fc2.weight)
            torch.nn.init.orthogonal_(self.fc3.weight)
        elif args.weights_init == "xavier":
            torch.nn.init.xavier_uniform_(self.fc1.weight)
            torch.nn.init.xavier_uniform_(self.fc2.weight)
            torch.nn.init.orthogonal_(self.fc3.weight)
        else:
            raise NotImplementedError 
Example #4
Source File: ppo3_continuous_action.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self):
        super(Value, self).__init__()
        self.fc1 = nn.Linear(input_shape, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 1)

        if args.weights_init == "orthogonal":
            torch.nn.init.orthogonal_(self.fc1.weight)
            torch.nn.init.orthogonal_(self.fc2.weight)
            torch.nn.init.orthogonal_(self.fc3.weight)
        elif args.weights_init == "xavier":
            torch.nn.init.xavier_uniform_(self.fc1.weight)
            torch.nn.init.xavier_uniform_(self.fc2.weight)
            torch.nn.init.orthogonal_(self.fc3.weight)
        else:
            raise NotImplementedError 
Example #5
Source File: ppo3_continuous_action.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self):
        super(Policy, self).__init__()
        self.fc1 = nn.Linear(input_shape, 120)
        self.fc2 = nn.Linear(120, 84)
        self.mean = nn.Linear(84, output_shape)
        self.logstd = nn.Parameter(torch.zeros(1, output_shape))

        if args.pol_layer_norm:
            self.ln1 = torch.nn.LayerNorm(120)
            self.ln2 = torch.nn.LayerNorm(84)
        if args.weights_init == "orthogonal":
            torch.nn.init.orthogonal_(self.fc1.weight)
            torch.nn.init.orthogonal_(self.fc2.weight)
            torch.nn.init.orthogonal_(self.mean.weight)
        elif args.weights_init == "xavier":
            torch.nn.init.xavier_uniform_(self.fc1.weight)
            torch.nn.init.xavier_uniform_(self.fc2.weight)
            torch.nn.init.xavier_uniform_(self.mean.weight)
        else:
            raise NotImplementedError 
Example #6
Source File: callbacks.py    From keras-rl with MIT License 5 votes vote down vote up
def __init__(self, **kwargs):
        kwargs = {
            'project': 'keras-rl',
            'anonymous': 'allow',
            **kwargs
        }
        wandb.init(**kwargs)
        self.episode_start = {}
        self.observations = {}
        self.rewards = {}
        self.actions = {}
        self.metrics = {}
        self.step = 0 
Example #7
Source File: test_logging.py    From skorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def wandb_run_cls(self):
        import wandb
        os.environ['WANDB_MODE'] = 'dryrun' # run offline
        with wandb.init(anonymous="allow") as run:
            return run 
Example #8
Source File: __init__.py    From rtrl with MIT License 5 votes vote down vote up
def run_wandb(entity, project, run_id, run_cls: type = Training, checkpoint_path: str = None):
  """run and save config and stats to https://wandb.com"""
  wandb_dir = mkdtemp()  # prevent wandb from polluting the home directory
  atexit.register(shutil.rmtree, wandb_dir, ignore_errors=True)  # clean up after wandb atexit handler finishes
  import wandb
  config = partial_to_dict(run_cls)
  config['seed'] = config['seed'] or randrange(1, 1000000)  # if seed == 0 replace with random
  config['environ'] = log_environment_variables()
  config['git'] = git_info()
  resume = checkpoint_path and exists(checkpoint_path)
  wandb.init(dir=wandb_dir, entity=entity, project=project, id=run_id, resume=resume, config=config)
  for stats in iterate_episodes(run_cls, checkpoint_path):
    [wandb.log(json.loads(s.to_json())) for s in stats] 
Example #9
Source File: sweeps.py    From simpletransformers with Apache License 2.0 5 votes vote down vote up
def train():
    # Initialize a new wandb run
    wandb.init()

    # Create a TransformerModel
    model = ClassificationModel("roberta", "roberta-base", use_cuda=True, args=model_args, sweep_config=wandb.config,)

    # Train the model
    model.train_model(train_df, eval_df=eval_df)

    # Evaluate the model
    model.eval_model(eval_df)

    # Sync wandb
    wandb.join() 
Example #10
Source File: visualization.py    From tape with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, log_dir: typing.Union[str, Path], exp_name: str, debug: bool = False):
        if not WANDB_FOUND:
            raise ImportError("wandb module not available")
        if debug:
            os.environ['WANDB_MODE'] = 'dryrun'
        if 'WANDB_PROJECT' not in os.environ:
            # Want the user to set the WANDB_PROJECT.
            logger.warning("WANDB_PROJECT environment variable not found, "
                           "not logging to app.wandb.ai")
            os.environ['WANDB_MODE'] = 'dryrun'
        wandb.init(dir=log_dir, name=exp_name) 
Example #11
Source File: wandb_logger.py    From packnet-sfm with MIT License 5 votes vote down vote up
def create_experiment(self):
        """Creates and returns a new experiment"""
        experiment = wandb.init(
            name=self._name, dir=self._dir, project=self._project,
            anonymous=self._anonymous, reinit=True, id=self._id,
            resume='allow', tags=self._tags, entity=self._entity
        )
        wandb.run.save()
        return experiment 
Example #12
Source File: callbacks.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def on_action_start(self, state):
        if state["global_rank"] is None or state["global_rank"] == 0:
            if _WANDB_AVAILABLE and wandb.run is None:
                wandb.init(name=self._name, project=self._project)
                if self._args is not None:
                    wandb.config.update(self._args)
            elif _WANDB_AVAILABLE and wandb.run is not None:
                logging.info("Re-using wandb session")
            else:
                logging.error("Could not import wandb. Did you install it (pip install --upgrade wandb)?")
                logging.info("Will not log data to weights and biases.")
                self._step_freq = -1 
Example #13
Source File: deprecated_callbacks.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def on_action_start(self):
        if self.global_rank is None or self.global_rank == 0:
            if self._wandb_name is not None or self._wandb_project is not None:
                if _WANDB_AVAILABLE and wandb.run is None:
                    wandb.init(name=self._wandb_name, project=self._wandb_project)
                elif _WANDB_AVAILABLE and wandb.run is not None:
                    logging.info("Re-using wandb session")
                else:
                    logging.error("Could not import wandb. Did you install it (pip install --upgrade wandb)?")
                    logging.info("Will not log data to weights and biases.")
                    self._wandb_name = None
                    self._wandb_project = None 
Example #14
Source File: deprecated_callbacks.py    From NeMo with Apache License 2.0 5 votes vote down vote up
def on_action_start(self):
        if self.global_rank is None or self.global_rank == 0:
            if _WANDB_AVAILABLE and wandb.run is None:
                wandb.init(name=self._name, project=self._project)
                if self._args is not None:
                    logging.info('init wandb session and append args')
                    wandb.config.update(self._args)
            elif _WANDB_AVAILABLE and wandb.run is not None:
                logging.info("Re-using wandb session")
            else:
                logging.error("Could not import wandb. Did you install it (pip install --upgrade wandb)?")
                logging.info("Will not log data to weights and biases.")
                self._update_freq = -1 
Example #15
Source File: wandb_logger.py    From catalyst with Apache License 2.0 5 votes vote down vote up
def on_stage_start(self, runner: IRunner):
        """Initialize Weights & Biases."""
        wandb.init(**self.logging_params, reinit=True, dir=str(runner.logdir))
        wandb.watch(
            models=runner.model, criterion=runner.criterion, log=self.log
        ) 
Example #16
Source File: wandb_logger.py    From ignite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, *args, **kwargs):
        try:
            import wandb

            self._wandb = wandb
        except ImportError:
            raise RuntimeError(
                "This contrib module requires wandb to be installed. "
                "You man install wandb with the command:\n pip install wandb\n"
            )
        if kwargs.get("init", True):
            wandb.init(*args, **kwargs) 
Example #17
Source File: logger.py    From RLcycle with MIT License 5 votes vote down vote up
def _initialize_wandb(self):
        """Initialize WandB logging."""
        time_info = datetime.now()
        timestamp = f"{time_info.year}-{time_info.month}-{time_info.day}"
        wandb.init(
            project=f"RLcycle-{self.experiment_cfg.experiment_info.env.name}",
            name=f"{self.experiment_cfg.experiment_info.experiment_name}/{timestamp}",
        )
        wandb.config.update(OmegaConf.to_container(self.experiment_cfg)) 
Example #18
Source File: test_logging.py    From skorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def neptune_experiment_cls(self):
        import neptune
        neptune.init(project_qualified_name="tests/dry-run",
                     backend=neptune.OfflineBackend())
        return neptune.create_experiment 
Example #19
Source File: wandb.py    From ludwig with Apache License 2.0 5 votes vote down vote up
def train_init(self, experiment_directory, experiment_name, model_name,
                   resume, output_directory):
        import wandb
        logger.info("wandb.train_init() called...")
        wandb.init(project=os.getenv("WANDB_PROJECT", experiment_name),
                   name=model_name, sync_tensorboard=True, dir=output_directory)
        wandb.save(os.path.join(experiment_directory, "*")) 
Example #20
Source File: distributed_logger.py    From rl_algorithms with MIT License 5 votes vote down vote up
def set_wandb(self):
        """Set configuration for wandb logging."""
        wandb.init(
            project=self.env_info.name,
            name=f"{self.log_cfg.agent}/{self.log_cfg.curr_time}",
        )
        wandb.config.update(vars(self.args))
        shutil.copy(self.args.cfg_path, os.path.join(wandb.run.dir, "config.py")) 
Example #21
Source File: agent.py    From rl_algorithms with MIT License 5 votes vote down vote up
def set_wandb(self):
        """Set configuration for wandb logging."""
        wandb.init(
            project=self.log_cfg.env_name,
            name=f"{self.log_cfg.agent}/{self.log_cfg.curr_time}",
        )
        wandb.config.update(vars(self.args))
        wandb.config.update(self.hyper_params)
        shutil.copy(self.args.cfg_path, os.path.join(wandb.run.dir, "config.py")) 
Example #22
Source File: c51_atari_visual.py    From cleanrl with MIT License 5 votes vote down vote up
def reset_parameters(self):
        nn.init.constant_(self.weight, 0.0)
        if self.bias is not None:
            nn.init.constant_(self.bias, 0.0) 
Example #23
Source File: ppo_atari_visual.py    From cleanrl with MIT License 5 votes vote down vote up
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer 
Example #24
Source File: ppo_self_play.py    From cleanrl with MIT License 5 votes vote down vote up
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer 
Example #25
Source File: dqn_atari_visual.py    From cleanrl with MIT License 5 votes vote down vote up
def reset_parameters(self):
        nn.init.constant_(self.weight, 0.0)
        if self.bias is not None:
            nn.init.constant_(self.bias, 0.0) 
Example #26
Source File: ppo_continuous_action.py    From cleanrl with MIT License 5 votes vote down vote up
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer 
Example #27
Source File: ppo_atari.py    From cleanrl with MIT License 5 votes vote down vote up
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer 
Example #28
Source File: ppo.py    From cleanrl with MIT License 5 votes vote down vote up
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)
    return layer 
Example #29
Source File: sac_continuous_action.py    From cleanrl with MIT License 5 votes vote down vote up
def layer_init(layer, weight_gain=1, bias_const=0):
    if isinstance(layer, nn.Linear):
        if args.weights_init == "xavier":
            torch.nn.init.xavier_uniform_(layer.weight, gain=weight_gain)
        elif args.weights_init == "orthogonal":
            torch.nn.init.orthogonal_(layer.weight, gain=weight_gain)
        if args.bias_init == "zeros":
            torch.nn.init.constant_(layer.bias, bias_const) 
Example #30
Source File: writer.py    From pytorch-project-template with Apache License 2.0 5 votes vote down vote up
def __init__(self, hp, logdir):
        self.hp = hp
        if hp.log.use_tensorboard:
            self.tensorboard = SummaryWriter(logdir)
        if hp.log.use_wandb:
            wandb_init_conf = hp.log.wandb_init_conf.to_dict()
            wandb_init_conf["config"] = hp.to_dict()
            wandb.init(**wandb_init_conf)