Python torch.manual_seed() Examples
The following are 30
code examples of torch.manual_seed().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: train.py From mmdetection with Apache License 2.0 | 9 votes |
def set_random_seed(seed, deterministic=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
Example #2
Source File: train_val.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def construct_graph(self): # Set the random seed torch.manual_seed(cfg.RNG_SEED) # Build the main computation graph self.net.create_architecture(self.imdb.num_classes, tag='default') # Define the loss # loss = layers['total_loss'] # Set learning rate and momentum lr = cfg.TRAIN.LEARNING_RATE params = [] for key, value in dict(self.net.named_parameters()).items(): if value.requires_grad: if 'bias' in key: params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}] else: params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}] self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM) # Write the train and validation information to tensorboard self.writer = tb.writer.FileWriter(self.tbdir) # self.valwriter = tb.writer.FileWriter(self.tbvaldir) return lr, self.optimizer
Example #3
Source File: test_sparse_adam.py From geoopt with Apache License 2.0 | 6 votes |
def test_adam_poincare(params): torch.manual_seed(44) manifold = geoopt.PoincareBall() ideal = manifold.random(10, 2) start = manifold.random(10, 2) start = geoopt.ManifoldParameter(start, manifold=manifold) def closure(): idx = torch.randint(10, size=(3,)) start_select = torch.nn.functional.embedding(idx, start, sparse=True) ideal_select = torch.nn.functional.embedding(idx, ideal, sparse=True) optim.zero_grad() loss = manifold.dist2(start_select, ideal_select).sum() loss.backward() assert start.grad.is_sparse return loss.item() optim = geoopt.optim.SparseRiemannianAdam([start], **params) for _ in range(2000): optim.step(closure) np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5)
Example #4
Source File: test_adam.py From geoopt with Apache License 2.0 | 6 votes |
def test_adam_lorentz(params): lorentz = geoopt.manifolds.Lorentz(k=torch.Tensor([1.0])) torch.manual_seed(42) with torch.no_grad(): X = geoopt.ManifoldParameter(torch.randn(20, 10), manifold=lorentz).proj_() Xstar = torch.randn(20, 10) Xstar.set_(lorentz.projx(Xstar)) def closure(): optim.zero_grad() loss = (Xstar - X).pow(2).sum() loss.backward() return loss.item() optim = geoopt.optim.RiemannianAdam([X], stabilize=4500, **params) for _ in range(10000): if (Xstar - X).norm() < 1e-5: break optim.step(closure) assert X.is_contiguous() np.testing.assert_allclose(X.data, Xstar, atol=1e-5, rtol=1e-5) optim.load_state_dict(optim.state_dict()) optim.step(closure)
Example #5
Source File: test_adam.py From geoopt with Apache License 2.0 | 6 votes |
def test_adam_poincare(): torch.manual_seed(44) manifold = geoopt.PoincareBall() ideal = torch.tensor([0.5, 0.5]) start = torch.randn(2) / 2 start = manifold.expmap0(start) start = geoopt.ManifoldParameter(start, manifold=manifold) def closure(): optim.zero_grad() loss = manifold.dist(start, ideal) ** 2 loss.backward() return loss.item() optim = geoopt.optim.RiemannianAdam([start], lr=1e-2) for _ in range(2000): optim.step(closure) np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5)
Example #6
Source File: test_sparse_rsgd.py From geoopt with Apache License 2.0 | 6 votes |
def test_adam_poincare(params): torch.manual_seed(44) manifold = geoopt.PoincareBall() ideal = manifold.random(10, 2) start = manifold.random(10, 2) start = geoopt.ManifoldParameter(start, manifold=manifold) def closure(): idx = torch.randint(10, size=(3,)) start_select = torch.nn.functional.embedding(idx, start, sparse=True) ideal_select = torch.nn.functional.embedding(idx, ideal, sparse=True) optim.zero_grad() loss = manifold.dist2(start_select, ideal_select).sum() loss.backward() assert start.grad.is_sparse return loss.item() optim = geoopt.optim.SparseRiemannianSGD([start], **params) for _ in range(2000): optim.step(closure) np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5)
Example #7
Source File: test_rsgd.py From geoopt with Apache License 2.0 | 6 votes |
def test_init_manifold(): torch.manual_seed(42) stiefel = geoopt.manifolds.Stiefel() rn = geoopt.manifolds.Euclidean() x0 = torch.randn(10, 10) x1 = torch.randn(10, 10) with torch.no_grad(): p0 = geoopt.ManifoldParameter(x0, manifold=stiefel).proj_() p1 = geoopt.ManifoldParameter(x1, manifold=rn) p0.grad = torch.zeros_like(p0) p1.grad = torch.zeros_like(p1) p0old = p0.clone() p1old = p1.clone() opt = geoopt.optim.RiemannianSGD([p0, p1], lr=1, stabilize=1) opt.zero_grad() opt.step() assert not np.allclose(p0.data, p0old.data) assert p0.is_contiguous() np.testing.assert_allclose(p1.data, p1old.data) np.testing.assert_allclose(p0.data, stiefel.projx(p0old.data), atol=1e-4)
Example #8
Source File: images.py From nsf with MIT License | 6 votes |
def eval_on_test(batch_size, num_workers, seed, _log): torch.manual_seed(seed) np.random.seed(seed) device = set_device() test_dataset, (c, h, w) = get_test_data() _log.info('Test dataset size: {}'.format(len(test_dataset))) _log.info('Image dimensions: {}x{}x{}'.format(c, h, w)) flow = create_flow(c, h, w).to(device) flow.eval() def log_prob_fn(batch): return flow.log_prob(batch.to(device)) test_loader=DataLoader(dataset=test_dataset, batch_size=batch_size, num_workers=num_workers) test_loader = tqdm(test_loader) mean, err = autils.eval_log_density_2(log_prob_fn=log_prob_fn, data_loader=test_loader, c=c, h=h, w=w) print('Test log probability (bits/dim): {:.2f} +/- {:.4f}'.format(mean, err))
Example #9
Source File: utils.py From tpu_pretrain with Apache License 2.0 | 6 votes |
def init(args): # init logger log_format = '%(asctime)-10s: %(message)s' if args.log_file is not None and args.log_file != "": Path(args.log_file).parent.mkdir(parents=True, exist_ok=True) logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format) logging.warning(f'This will get logged to file: {args.log_file}') else: logging.basicConfig(level=logging.INFO, format=log_format) # create output dir if args.output_dir.is_dir() and list(args.output_dir.iterdir()): logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!") assert 'bert' in args.output_dir.name, \ '''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type''' args.output_dir.mkdir(parents=True, exist_ok=True) # set random seeds random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed)
Example #10
Source File: images.py From nsf with MIT License | 6 votes |
def main(seed, _log): torch.manual_seed(seed) np.random.seed(seed) device = set_device() train_dataset, val_dataset, (c, h, w) = get_train_valid_data() _log.info('Training dataset size: {}'.format(len(train_dataset))) if val_dataset is None: _log.info('No validation dataset') else: _log.info('Validation dataset size: {}'.format(len(val_dataset))) _log.info('Image dimensions: {}x{}x{}'.format(c, h, w)) flow = create_flow(c, h, w) train_flow(flow, train_dataset, val_dataset, (c, h, w), device)
Example #11
Source File: images.py From nsf with MIT License | 6 votes |
def sample(seed, num_bits, num_samples, samples_per_row, _log, output_path=None): torch.set_grad_enabled(False) if output_path is None: output_path = 'samples.png' torch.manual_seed(seed) np.random.seed(seed) device = set_device() _, _, (c, h, w) = get_train_valid_data() flow = create_flow(c, h, w).to(device) flow.eval() preprocess = Preprocess(num_bits) samples = flow.sample(num_samples) samples = preprocess.inverse(samples) save_image(samples.cpu(), output_path, nrow=samples_per_row, padding=0)
Example #12
Source File: dcgan.py From Pytorch-Project-Template with MIT License | 6 votes |
def load_checkpoint(self, file_name): filename = self.config.checkpoint_dir + file_name try: self.logger.info("Loading checkpoint '{}'".format(filename)) checkpoint = torch.load(filename) self.current_epoch = checkpoint['epoch'] self.current_iteration = checkpoint['iteration'] self.netG.load_state_dict(checkpoint['G_state_dict']) self.optimG.load_state_dict(checkpoint['G_optimizer']) self.netD.load_state_dict(checkpoint['D_state_dict']) self.optimD.load_state_dict(checkpoint['D_optimizer']) self.fixed_noise = checkpoint['fixed_noise'] self.manual_seed = checkpoint['manual_seed'] self.logger.info("Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n" .format(self.config.checkpoint_dir, checkpoint['epoch'], checkpoint['iteration'])) except OSError as e: self.logger.info("No checkpoint exists from '{}'. Skipping...".format(self.config.checkpoint_dir)) self.logger.info("**First time to train**")
Example #13
Source File: depthest_main.py From ACAN with MIT License | 6 votes |
def main(): args = Parameters().parse() np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) torch.cuda.manual_seed_all(args.random_seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.enabled = True # Dataset datasets = create_datasets(args) # Network net = create_network(args) # Loss Function criterion = create_lossfunc(args, net) # optimizer and parameters optim_params = create_params(args, net) optimizer = create_optimizer(args, optim_params) # learning rate scheduler scheduler = create_scheduler(args, optimizer, datasets) if args.mode == 'train': train(args, net, datasets, criterion, optimizer, scheduler) return if args.mode == 'test': test(args, net, datasets) return
Example #14
Source File: embedding.py From Hash-Embeddings with MIT License | 6 votes |
def reset_parameters(self, init_shared=lambda x: normal(x, std=0.1), init_importance=lambda x: normal(x, std=0.0005)): """Resets the trainable parameters.""" def set_constant_row(parameters, iRow=0, value=0): """Return `parameters` with row `iRow` as s constant `value`.""" data = parameters.data data[iRow, :] = value return torch.nn.Parameter(data, requires_grad=parameters.requires_grad) np.random.seed(self.seed) if self.seed is not None: torch.manual_seed(self.seed) self.shared_embeddings.weight = init_shared(self.shared_embeddings.weight) self.importance_weights.weight = init_importance(self.importance_weights.weight) if self.padding_idx is not None: # Unfortunately has to set weight to 0 even when paddingIdx = 0 self.shared_embeddings.weight = set_constant_row(self.shared_embeddings.weight) self.importance_weights.weight = set_constant_row(self.importance_weights.weight) self.shared_embeddings.weight.requires_grad = self.train_sharedEmbed self.importance_weights.weight.requires_grad = self.train_weight
Example #15
Source File: distributed_slurm_main.py From pytorch-distributed with MIT License | 6 votes |
def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True # torch.backends.cudnn.enabled = False warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') args.local_rank = int(os.environ["SLURM_PROCID"]) args.world_size = int(os.environ["SLURM_NPROCS"]) ngpus_per_node = torch.cuda.device_count() job_id = os.environ["SLURM_JOBID"] args.dist_url = "file://{}.{}".format(os.path.realpath(args.dist_file), job_id) mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
Example #16
Source File: CBP.py From fast-MPN-COV with MIT License | 6 votes |
def __init__(self, thresh=1e-8, projDim=8192, input_dim=512): super(CBP, self).__init__() self.thresh = thresh self.projDim = projDim self.input_dim = input_dim self.output_dim = projDim torch.manual_seed(1) self.h_ = [ torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long), torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long) ] self.weights_ = [ (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float(), (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float() ] indices1 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1), self.h_[0].reshape(1, -1)), dim=0) indices2 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1), self.h_[1].reshape(1, -1)), dim=0) self.sparseM = [ torch.sparse.FloatTensor(indices1, self.weights_[0], torch.Size([self.input_dim, self.output_dim])).to_dense(), torch.sparse.FloatTensor(indices2, self.weights_[1], torch.Size([self.input_dim, self.output_dim])).to_dense(), ]
Example #17
Source File: horovod_distributed.py From pytorch-distributed with MIT License | 6 votes |
def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') hvd.init() local_rank = hvd.local_rank() torch.cuda.set_device(local_rank) main_worker(local_rank, 4, args)
Example #18
Source File: Sequicity.py From ConvLab with MIT License | 6 votes |
def __init__(self, archive_file=DEFAULT_ARCHIVE_FILE, model_file=None): SysPolicy.__init__(self) if not os.path.isfile(archive_file): if not model_file: raise Exception("No model for Sequicity is specified!") archive_file = cached_path(model_file) model_dir = os.path.dirname(os.path.abspath(__file__)) if not os.path.exists(os.path.join(model_dir, 'data')): archive = zipfile.ZipFile(archive_file, 'r') archive.extractall(model_dir) cfg.init_handler('tsdf-multiwoz') torch.manual_seed(cfg.seed) torch.cuda.manual_seed(cfg.seed) random.seed(cfg.seed) np.random.seed(cfg.seed) self.m = Model('multiwoz') self.m.count_params() self.m.load_model() self.reset()
Example #19
Source File: test_lorentz_math.py From geoopt with Apache License 2.0 | 5 votes |
def seed(request): seed = request.param torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) return seed
Example #20
Source File: apex_distributed.py From pytorch-distributed with MIT License | 5 votes |
def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') main_worker(args.local_rank, 4, args)
Example #21
Source File: images.py From nsf with MIT License | 5 votes |
def plot_data(num_bits, num_samples, samples_per_row, seed): torch.manual_seed(seed) np.random.seed(seed) train_dataset, _, _ = get_train_valid_data() samples = torch.cat([train_dataset[i][0] for i in np.random.randint(0, len(train_dataset), num_samples)]) samples = Preprocess(num_bits).inverse(samples) save_image(samples.cpu(), 'samples.png', nrow=samples_per_row, padding=0)
Example #22
Source File: distributed.py From pytorch-distributed with MIT License | 5 votes |
def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') main_worker(args.local_rank, 4, args)
Example #23
Source File: dataparallel.py From pytorch-distributed with MIT License | 5 votes |
def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') gpus = [0, 1, 2, 3] main_worker(gpus=gpus, args=args)
Example #24
Source File: clone.py From derplearning with MIT License | 5 votes |
def train(config, experiment_path, gpu): device = torch.device('cuda:' + gpu if torch.cuda.is_available() else 'cpu') model_fn = eval('derp.model.' + config['train']['model']) criterion = eval('torch.nn.' + config['train']['criterion'])().to(device) optimizer_fn = eval('torch.optim.' + config['train']['optimizer']) scheduler_fn = torch.optim.lr_scheduler.ReduceLROnPlateau dim_in = np.array([config['thumb'][x] for x in ['depth', 'height', 'width']]) # Prepare transforms transformer = derp.model.compose_transforms(config['train']['transforms']) train_fetcher = Fetcher(experiment_path / 'train', transformer, config['predict']) assert len(train_fetcher) test_fetcher = Fetcher(experiment_path / 'test', transformer, config['predict']) assert len(test_fetcher) train_loader = DataLoader( train_fetcher, config['train']['batch_size'], shuffle=True, num_workers=3, ) test_loader = DataLoader(test_fetcher, config['train']['batch_size'], num_workers=3) print('Train Loader: %6i' % len(train_loader.dataset)) print('Test Loader: %6i' % len(test_loader.dataset)) np.random.seed(config['seed']) torch.manual_seed(config['seed']) n_status = len(config['status']) n_predict = len(config['predict']) model = model_fn(dim_in, n_status, n_predict).to(device) optimizer = optimizer_fn(model.parameters(), config['train']['learning_rate']) scheduler = scheduler_fn(optimizer, factor=0.1, verbose=True, patience=8) loss_threshold = derp.model.test_epoch(device, model, criterion, test_loader) print('initial loss: %.6f' % loss_threshold) for epoch in range(config['train']['epochs']): start_time = time.time() train_loss = derp.model.train_epoch(device, model, optimizer, criterion, train_loader) test_loss = derp.model.test_epoch(device, model, criterion, test_loader) scheduler.step(test_loss) note = '' if test_loss < loss_threshold: loss_threshold = test_loss torch.save(model, str(experiment_path / 'model.pt')) note = 'saved' duration = time.time() - start_time print('Epoch %5i %.6f %.6f %.1fs %s' % (epoch, train_loss, test_loss, duration, note))
Example #25
Source File: utils.py From pytorch_sac_ae with MIT License | 5 votes |
def set_seed_everywhere(seed): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed)
Example #26
Source File: util.py From ConvLab with MIT License | 5 votes |
def set_random_seed(spec): '''Generate and set random seed for relevant modules, and record it in spec.meta.random_seed''' torch.set_num_threads(1) # prevent multithread slowdown, set again for hogwild trial = spec['meta']['trial'] session = spec['meta']['session'] random_seed = int(1e5 * (trial or 0) + 1e3 * (session or 0) + time.time()) torch.cuda.manual_seed_all(random_seed) torch.manual_seed(random_seed) np.random.seed(random_seed) spec['meta']['random_seed'] = random_seed return random_seed
Example #27
Source File: utils.py From ConvLab with MIT License | 5 votes |
def set_seed(seed): """Sets random seed everywhere.""" th.manual_seed(seed) if th.cuda.is_available(): th.cuda.manual_seed(seed) np.random.seed(seed)
Example #28
Source File: test_rhmc.py From geoopt with Apache License 2.0 | 5 votes |
def test_leapfrog_reversibility(params): class NormalDist(torch.nn.Module): def __init__(self, mu, sigma): super().__init__() self.d = torch.distributions.Normal(mu, sigma) self.x = torch.nn.Parameter(torch.randn_like(mu)) def forward(self): return self.d.log_prob(self.x).sum() epsilon, n_steps = params["epsilon"], params["n_steps"] torch.manual_seed(42) nd = NormalDist(torch.randn([10]), torch.ones([10])) init_x = nd.x.data.numpy().copy() torch.manual_seed(42) sampler = geoopt.samplers.rhmc.RHMC(nd.parameters(), **params) r = torch.randn([10]) for i in range(n_steps): logp = nd() logp.backward() with torch.no_grad(): sampler._step(nd.x, r, epsilon) nd.x.grad.zero_() for i in range(n_steps): logp = nd() logp.backward() with torch.no_grad(): sampler._step(nd.x, r, -epsilon) nd.x.grad.zero_() new_x = nd.x.data.numpy().copy() np.testing.assert_allclose(init_x, new_x, rtol=1e-5)
Example #29
Source File: test_rhmc.py From geoopt with Apache License 2.0 | 5 votes |
def test_sampling_manifold(params): # should just work (all business stuff is checked above) class NormalDist(torch.nn.Module): def __init__(self, mu, sigma): super().__init__() self.d = torch.distributions.Normal(mu, sigma) self.x = geoopt.ManifoldParameter( torch.randn_like(mu), manifold=geoopt.Stiefel() ) def forward(self): return self.d.log_prob(self.x).sum() torch.manual_seed(42) D = (5, 4) n_burn, n_samples = params.pop("n_burn"), params.pop("n_samples") nd = params.pop("nd") # type: tuple mu = torch.randn(nd + D) sigma = torch.randn(nd + D).abs() nd = NormalDist(mu, sigma) Sampler = getattr(geoopt.samplers, params.pop("sampler")) sampler = Sampler(nd.parameters(), **params) for _ in range(n_burn): sampler.step(nd) points = [] sampler.burnin = False for _ in range(n_samples): sampler.step(nd) points.append(nd.x.detach().numpy().copy())
Example #30
Source File: env.py From AerialDetection with Apache License 2.0 | 5 votes |
def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)