Python torch.nn.MSELoss() Examples
The following are 30
code examples of torch.nn.MSELoss().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: base_agent.py From 2D-Motion-Retargeting with MIT License | 7 votes |
def __init__(self, config, net): self.log_dir = config.log_dir self.model_dir = config.model_dir self.net = net self.clock = TrainClock() self.device = config.device self.use_triplet = config.use_triplet self.use_footvel_loss = config.use_footvel_loss # set loss function self.mse = nn.MSELoss() self.tripletloss = nn.TripletMarginLoss(margin=config.triplet_margin) self.triplet_weight = config.triplet_weight self.foot_idx = config.foot_idx self.footvel_loss_weight = config.footvel_loss_weight # set optimizer self.optimizer = optim.Adam(self.net.parameters(), config.lr) self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, 0.99)
Example #2
Source File: nets.py From e2e-model-learning with Apache License 2.0 | 6 votes |
def run_rmse_net(model, variables, X_train, Y_train): opt = optim.Adam(model.parameters(), lr=1e-3) for i in range(1000): opt.zero_grad() model.train() train_loss = nn.MSELoss()( model(variables['X_train_'])[0], variables['Y_train_']) train_loss.backward() opt.step() model.eval() test_loss = nn.MSELoss()( model(variables['X_test_'])[0], variables['Y_test_']) print(i, train_loss.data[0], test_loss.data[0]) model.eval() model.set_sig(variables['X_train_'], variables['Y_train_']) return model
Example #3
Source File: networks.py From Recycle-GAN with MIT License | 6 votes |
def print_network(net): num_params = 0 for param in net.parameters(): num_params += param.numel() print(net) print('Total number of parameters: %d' % num_params) ############################################################################## # Classes ############################################################################## # Defines the GAN loss which uses either LSGAN or the regular GAN. # When LSGAN is used, it is basically same as MSELoss, # but it abstracts away the need to create the target label tensor # that has the same size as the input
Example #4
Source File: losses.py From pase with MIT License | 6 votes |
def __init__(self, discriminator, d_optimizer, size_average=True, loss='L2', batch_acum=1, device='cpu'): super().__init__() self.discriminator = discriminator self.d_optimizer = d_optimizer self.batch_acum = batch_acum if loss == 'L2': self.loss = nn.MSELoss(size_average) self.labels = [1, -1, 0] elif loss == 'BCE': self.loss = nn.BCEWithLogitsLoss() self.labels = [1, 0, 1] elif loss == 'Hinge': self.loss = None else: raise ValueError('Urecognized loss: {}'.format(loss)) self.device = device
Example #5
Source File: networks.py From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License | 6 votes |
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): """ Initialize the GANLoss class. Parameters: gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. target_real_label (bool) - - label for a real image target_fake_label (bool) - - label of a fake image Note: Do not use sigmoid as the last layer of Discriminator. LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. """ super(GANLoss, self).__init__() self.register_buffer('real_label', torch.tensor(target_real_label)) self.register_buffer('fake_label', torch.tensor(target_fake_label)) self.gan_mode = gan_mode if gan_mode == 'lsgan': self.loss = nn.MSELoss() elif gan_mode == 'vanilla': self.loss = nn.BCEWithLogitsLoss() elif gan_mode in ['wgangp']: self.loss = None else: raise NotImplementedError('gan mode %s not implemented' % gan_mode)
Example #6
Source File: svg.py From pytorchrl with MIT License | 6 votes |
def train_qf(self, expected_qval, obs_val, actions_val): """ """ obs = Variable(torch.from_numpy(obs_val)).type( torch.FloatTensor) actions = Variable(torch.from_numpy(actions_val)).type( torch.FloatTensor) expected_q = Variable(torch.from_numpy(expected_qval)).type( torch.FloatTensor) q_vals = self.qf(obs, actions) # Define loss function loss_fn = nn.MSELoss() loss = loss_fn(q_vals, expected_q) # Backpropagation and gradient descent self.qf_optimizer.zero_grad() loss.backward() self.qf_optimizer.step() return loss.data.numpy()
Example #7
Source File: networks.py From DepthNets with MIT License | 6 votes |
def print_network(net): num_params = 0 for param in net.parameters(): num_params += param.numel() print(net) print('Total number of parameters: %d' % num_params) ############################################################################## # Classes ############################################################################## # Defines the GAN loss which uses either LSGAN or the regular GAN. # When LSGAN is used, it is basically same as MSELoss, # but it abstracts away the need to create the target label tensor # that has the same size as the input
Example #8
Source File: capsnet.py From Pytorch-CapsuleNet with MIT License | 6 votes |
def __init__(self, config=None): super(CapsNet, self).__init__() if config: self.conv_layer = ConvLayer(config.cnn_in_channels, config.cnn_out_channels, config.cnn_kernel_size) self.primary_capsules = PrimaryCaps(config.pc_num_capsules, config.pc_in_channels, config.pc_out_channels, config.pc_kernel_size, config.pc_num_routes) self.digit_capsules = DigitCaps(config.dc_num_capsules, config.dc_num_routes, config.dc_in_channels, config.dc_out_channels) self.decoder = Decoder(config.input_width, config.input_height, config.cnn_in_channels) else: self.conv_layer = ConvLayer() self.primary_capsules = PrimaryCaps() self.digit_capsules = DigitCaps() self.decoder = Decoder() self.mse_loss = nn.MSELoss()
Example #9
Source File: loss.py From KAIR with MIT License | 6 votes |
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0): super(GANLoss, self).__init__() self.gan_type = gan_type.lower() self.real_label_val = real_label_val self.fake_label_val = fake_label_val if self.gan_type == 'gan' or self.gan_type == 'ragan': self.loss = nn.BCEWithLogitsLoss() elif self.gan_type == 'lsgan': self.loss = nn.MSELoss() elif self.gan_type == 'wgan-gp': def wgan_loss(input, target): # target is boolean return -1 * input.mean() if target else input.mean() self.loss = wgan_loss else: raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
Example #10
Source File: model_plain2.py From KAIR with MIT License | 6 votes |
def define_loss(self): G_lossfn_type = self.opt_train['G_lossfn_type'] if G_lossfn_type == 'l1': self.G_lossfn = nn.L1Loss().to(self.device) elif G_lossfn_type == 'l2': self.G_lossfn = nn.MSELoss().to(self.device) elif G_lossfn_type == 'l2sum': self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device) elif G_lossfn_type == 'ssim': self.G_lossfn = SSIMLoss().to(self.device) else: raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type)) self.G_lossfn_weight = self.opt_train['G_lossfn_weight'] # ---------------------------------------- # define optimizer # ----------------------------------------
Example #11
Source File: SDAE.py From DCC with MIT License | 6 votes |
def __init__(self, dim, dropout=0.2, slope=0.0): super(SDAE, self).__init__() self.in_dim = dim[0] self.nlayers = len(dim)-1 self.reluslope = slope self.enc, self.dec = [], [] for i in range(self.nlayers): self.enc.append(nn.Linear(dim[i], dim[i+1])) setattr(self, 'enc_{}'.format(i), self.enc[-1]) self.dec.append(nn.Linear(dim[i+1], dim[i])) setattr(self, 'dec_{}'.format(i), self.dec[-1]) self.base = [] for i in range(self.nlayers): self.base.append(nn.Sequential(*self.enc[:i])) self.dropmodule1 = nn.Dropout(p=dropout) self.dropmodule2 = nn.Dropout(p=dropout) self.loss = nn.MSELoss(size_average=True) # initialization for m in self.modules(): if isinstance(m, nn.Linear): init.normal(m.weight, std=1e-2) if m.bias.data is not None: init.constant(m.bias, 0)
Example #12
Source File: model_plain4.py From KAIR with MIT License | 6 votes |
def define_loss(self): G_lossfn_type = self.opt_train['G_lossfn_type'] if G_lossfn_type == 'l1': self.G_lossfn = nn.L1Loss().to(self.device) elif G_lossfn_type == 'l2': self.G_lossfn = nn.MSELoss().to(self.device) elif G_lossfn_type == 'l2sum': self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device) elif G_lossfn_type == 'ssim': self.G_lossfn = SSIMLoss().to(self.device) else: raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type)) self.G_lossfn_weight = self.opt_train['G_lossfn_weight'] # ---------------------------------------- # define optimizer # ----------------------------------------
Example #13
Source File: model_plain.py From KAIR with MIT License | 6 votes |
def define_loss(self): G_lossfn_type = self.opt_train['G_lossfn_type'] if G_lossfn_type == 'l1': self.G_lossfn = nn.L1Loss().to(self.device) elif G_lossfn_type == 'l2': self.G_lossfn = nn.MSELoss().to(self.device) elif G_lossfn_type == 'l2sum': self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device) elif G_lossfn_type == 'ssim': self.G_lossfn = SSIMLoss().to(self.device) else: raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type)) self.G_lossfn_weight = self.opt_train['G_lossfn_weight'] # ---------------------------------------- # define optimizer # ----------------------------------------
Example #14
Source File: prediction_head.py From FARM with Apache License 2.0 | 6 votes |
def __init__( self, layer_dims=[768,1], task_name="regression", **kwargs, ): super(RegressionHead, self).__init__() # num_labels could in most cases also be automatically retrieved from the data processor self.layer_dims = layer_dims self.feed_forward = FeedForwardBlock(self.layer_dims) # num_labels is being set to 2 since it is being hijacked to store the scaling factor and the mean self.num_labels = 2 self.ph_output_type = "per_sequence_continuous" self.model_type = "regression" self.loss_fct = MSELoss(reduction="none") self.task_name = task_name self.generate_config()
Example #15
Source File: model_test.py From pytorch-project-template with Apache License 2.0 | 6 votes |
def test_save_load_state(self): local_net = Net_arch(self.hp) self.loss_f = nn.MSELoss() local_model = Model(self.hp, local_net, self.loss_f) self.model.save_training_state(self.logger) save_filename = "%s_%d.state" % (self.hp.log.name, self.model.step) save_path = os.path.join(self.hp.log.chkpt_dir, save_filename) self.hp.load.resume_state_path = save_path assert os.path.exists(save_path) and os.path.isfile(save_path) assert os.path.exists(self.hp.log.log_file_path) and os.path.isfile( self.hp.log.log_file_path ) local_model.load_training_state(logger=self.logger) parameters = zip( list(local_model.net.parameters()), list(self.model.net.parameters()) ) for load, origin in parameters: assert (load == origin).all() assert local_model.epoch == self.model.epoch assert local_model.step == self.model.step
Example #16
Source File: model_test.py From pytorch-project-template with Apache License 2.0 | 6 votes |
def test_save_load_network(self): local_net = Net_arch(self.hp) self.loss_f = nn.MSELoss() local_model = Model(self.hp, local_net, self.loss_f) self.model.save_network(self.logger) save_filename = "%s_%d.pt" % (self.hp.log.name, self.model.step) save_path = os.path.join(self.hp.log.chkpt_dir, save_filename) self.hp.load.network_chkpt_path = save_path assert os.path.exists(save_path) and os.path.isfile(save_path) assert os.path.exists(self.hp.log.log_file_path) and os.path.isfile( self.hp.log.log_file_path ) local_model.load_network(logger=self.logger) parameters = zip( list(local_model.net.parameters()), list(self.model.net.parameters()) ) for load, origin in parameters: assert (load == origin).all()
Example #17
Source File: BPDA.py From DeepRobust with MIT License | 6 votes |
def BPDA_attack(image,target, model, step_size = 1., iterations = 10, linf=False, transform_func=identity_transform): target = label2tensor(target) adv = image.detach().numpy() adv = torch.from_numpy(adv) adv.requires_grad_() for _ in range(iterations): adv_def = transform_func(adv) adv_def.requires_grad_() l2 = nn.MSELoss() loss = l2(0, adv_def) loss.backward() g = get_cw_grad(adv_def, image, target, model) if linf: g = torch.sign(g) print(g.numpy().sum()) adv = adv.detach().numpy() - step_size * g.numpy() adv = clip_bound(adv) adv = torch.from_numpy(adv) adv.requires_grad_() if linf: print('label', torch.argmax(model(adv)), 'linf', torch.max(torch.abs(adv - image)).detach().numpy()) else: print('label', torch.argmax(model(adv)), 'l2', l2_norm(adv, image)) return adv.detach().numpy()
Example #18
Source File: pytorch_train_aclstm.py From Auto_Conditioned_RNN_motion with MIT License | 5 votes |
def calculate_loss(self, out_seq, groundtruth_seq): loss_function = nn.MSELoss() loss = loss_function(out_seq, groundtruth_seq) return loss #numpy array real_seq_np: batch*seq_len*frame_size
Example #19
Source File: noise2noise.py From noise2noise-pytorch with MIT License | 5 votes |
def _compile(self): """Compiles model (architecture, loss function, optimizers, etc.).""" print('Noise2Noise: Learning Image Restoration without Clean Data (Lethinen et al., 2018)') # Model (3x3=9 channels for Monte Carlo since it uses 3 HDR buffers) if self.p.noise_type == 'mc': self.is_mc = True self.model = UNet(in_channels=9) else: self.is_mc = False self.model = UNet(in_channels=3) # Set optimizer and loss, if in training mode if self.trainable: self.optim = Adam(self.model.parameters(), lr=self.p.learning_rate, betas=self.p.adam[:2], eps=self.p.adam[2]) # Learning rate adjustment self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim, patience=self.p.nb_epochs/4, factor=0.5, verbose=True) # Loss function if self.p.loss == 'hdr': assert self.is_mc, 'Using HDR loss on non Monte Carlo images' self.loss = HDRLoss() elif self.p.loss == 'l2': self.loss = nn.MSELoss() else: self.loss = nn.L1Loss() # CUDA support self.use_cuda = torch.cuda.is_available() and self.p.cuda if self.use_cuda: self.model = self.model.cuda() if self.trainable: self.loss = self.loss.cuda()
Example #20
Source File: MSELoss.py From sentence-transformers with Apache License 2.0 | 5 votes |
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor): rep = self.model(sentence_features[0])['sentence_embedding'] loss_fct = nn.MSELoss() loss = loss_fct(rep, labels) return loss
Example #21
Source File: MSELoss.py From sentence-transformers with Apache License 2.0 | 5 votes |
def __init__(self, model): super(MSELoss, self).__init__() self.model = model
Example #22
Source File: CosineSimilarityLoss.py From sentence-transformers with Apache License 2.0 | 5 votes |
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor): reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features] rep_a, rep_b = reps output = torch.cosine_similarity(rep_a, rep_b) loss_fct = nn.MSELoss() if labels is not None: loss = loss_fct(output, labels.view(-1)) return loss else: return reps, output
Example #23
Source File: ddpg.py From pytorchrl with MIT License | 5 votes |
def train_qf(self, expected_qval, obs_val, actions_val): """ Given the mini-batch, fit the Q-value with L2 norm (defined in optimizer). Parameters ---------- expected_qval (numpy.ndarray): expected q values in numpy array form. obs_val (numpy.ndarray): states draw from mini-batch, should have the same amount of rows as expected_qval. actions_val (numpy.ndarray): actions draw from mini-batch, should have the same amount of rows as expected_qval. """ # Create Variable for input and output, we do not need gradient # of loss with respect to these variables obs = Variable(torch.from_numpy(obs_val)).type( torch.FloatTensor) actions = Variable(torch.from_numpy(actions_val)).type( torch.FloatTensor) expected_q = Variable(torch.from_numpy(expected_qval)).type( torch.FloatTensor) q_vals = self.qf(obs, actions) # Define loss function loss_fn = nn.MSELoss() loss = loss_fn(q_vals, expected_q) # Backpropagation and gradient descent self.qf_optimizer.zero_grad() loss.backward() self.qf_optimizer.step() return loss.data.numpy()
Example #24
Source File: losses.py From pase with MIT License | 5 votes |
def __init__(self, z_gen=torch.randn, loss='L2'): self.z_gen = z_gen self.loss = loss if loss == 'L2': self.criterion = nn.MSELoss() elif loss == 'BCE': self.criterion = nn.BCEWithLogitsLoss() else: raise ValueError('Unrecognized loss ', loss)
Example #25
Source File: losses.py From pase with MIT License | 5 votes |
def __init__(self, z_gen=torch.randn, batch_acum=1, grad_reverse=False, loss='L2'): self.z_gen = z_gen self.batch_acum = batch_acum self.grad_reverse = grad_reverse self.loss = loss if loss == 'L2': self.criterion = nn.MSELoss() elif loss == 'BCE': self.criterion = nn.BCEWithLogitsLoss() else: raise ValueError('Unrecognized loss ', loss)
Example #26
Source File: off_policy_algo.py From cerl with Apache License 2.0 | 5 votes |
def __init__(self, wwid, algo_name, state_dim, action_dim, actor_lr, critic_lr, gamma, tau, init_w = True): self.algo_name = algo_name; self.gamma = gamma; self.tau = tau #Initialize actors self.actor = Actor(state_dim, action_dim, wwid) if init_w: self.actor.apply(utils.init_weights) self.actor_target = Actor(state_dim, action_dim, wwid) utils.hard_update(self.actor_target, self.actor) self.actor_optim = Adam(self.actor.parameters(), actor_lr) self.critic = Critic(state_dim, action_dim) if init_w: self.critic.apply(utils.init_weights) self.critic_target = Critic(state_dim, action_dim) utils.hard_update(self.critic_target, self.critic) self.critic_optim = Adam(self.critic.parameters(), critic_lr) self.loss = nn.MSELoss() self.actor_target.cuda(); self.critic_target.cuda(); self.actor.cuda(); self.critic.cuda() self.num_critic_updates = 0 #Statistics Tracker self.action_loss = {'min':[], 'max': [], 'mean':[], 'std':[]} self.policy_loss = {'min':[], 'max': [], 'mean':[], 'std':[]} self.critic_loss = {'mean':[]} self.q = {'min':[], 'max': [], 'mean':[], 'std':[]} self.val = {'min':[], 'max': [], 'mean':[], 'std':[]}
Example #27
Source File: loss_functions.py From Talking-Face-Generation-DAVS with MIT License | 5 votes |
def __init__(self, opt=opt): super(L2SoftmaxLoss, self).__init__() self.softmax = nn.Softmax() self.L2loss = nn.MSELoss() self.label = None
Example #28
Source File: loss_functions.py From Talking-Face-Generation-DAVS with MIT License | 5 votes |
def __init__(self, use_lsgan=False, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor, softlabel=False): super(GANLoss, self).__init__() self.real_label = target_real_label self.fake_label = target_fake_label self.real_label_var = None self.fake_label_var = None self.Tensor = tensor self.softlabel = softlabel if use_lsgan: self.loss = nn.MSELoss() else: self.loss = nn.BCELoss()
Example #29
Source File: modeling_bert.py From BERT-Relation-Extraction with Apache License 2.0 | 5 votes |
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions)
Example #30
Source File: no_action_feedforward_predictor.py From atari-representation-learning with MIT License | 5 votes |
def __init__(self, encoder, config, device=torch.device('cpu'), wandb=None): super().__init__(encoder, wandb, device) self.config = config self.patience = self.config["patience"] self.fc_size = self.config["naff_fc_size"] self.pred_offset = self.config["pred_offset"] self.naff = NaFFPredictor(encoder, self.fc_size).to(device) self.epochs = config['epochs'] self.batch_size = config['batch_size'] self.device = device self.optimizer = torch.optim.Adam(list(self.naff.parameters()), lr=config['lr'], eps=1e-5) self.loss_fn = nn.MSELoss() self.early_stopper = EarlyStopping(patience=self.patience, verbose=False, wandb=self.wandb, name="encoder")