Python chainer.functions.mean_squared_error() Examples
The following are 30
code examples of chainer.functions.mean_squared_error().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.functions
, or try the search function
.
Example #1
Source File: linear_regression.py From dockerfiles with Apache License 2.0 | 6 votes |
def linear_train(train_data, train_target, n_epochs=200): for _ in range(n_epochs): # Get the result of the forward pass. output = linear_forward(train_data) # Calculate the loss between the training data and target data. loss = F.mean_squared_error(train_target, output) # Zero all gradients before updating them. linear_function.zerograds() # Calculate and update all gradients. loss.backward() # Use the optmizer to move all parameters of the network # to values which will reduce the loss. optimizer.update()
Example #2
Source File: fcn8s_matting.py From portrait_matting with GNU General Public License v3.0 | 6 votes |
def __call__(self, x, t=None, w=None): # t, w is on host. # Forward network alpha = self.forward(x) if t is None: assert not chainer.config.train return # Weighted mean squared error # TODO: Do more tests # loss = F.mean(F.squared_error(alpha, t) * w) loss = F.mean_squared_error(alpha, t) if np.isnan(float(loss.data)): raise ValueError('Loss is nan.') chainer.report({'loss': loss}, self) return loss
Example #3
Source File: dqn_agent_nips.py From DQN-chainer with MIT License | 6 votes |
def forward(self, state, action, Reward, state_dash, episode_end): num_of_batch = state.shape[0] s = Variable(state) s_dash = Variable(state_dash) Q = self.Q_func(s) # Get Q-value # Generate Target Signals max_Q_dash_ = self.Q_func(s_dash) tmp = list(map(np.max, max_Q_dash_.data.get())) max_Q_dash = np.asanyarray(tmp, dtype=np.float32) target = np.asanyarray(Q.data.get(), dtype=np.float32) for i in xrange(num_of_batch): if not episode_end[i][0]: tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i] else: tmp_ = np.sign(Reward[i]) target[i, self.action_to_index(action[i])] = tmp_ loss = F.mean_squared_error(Variable(cuda.to_gpu(target)), Q) return loss, Q
Example #4
Source File: basic_cnn_tail.py From SeRanet with MIT License | 6 votes |
def __call__(self, x, t=None): self.clear() #x = Variable(x_data) # x_data.astype(np.float32) h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #5
Source File: plot_chainer_MLP.py From soft-dtw with BSD 2-Clause "Simplified" License | 6 votes |
def __call__(self, x, t): y = self.predictor(x) if self.loss == "euclidean": return F.mean_squared_error(y, t) elif self.loss == "sdtw": loss = 0 for i in range(y.shape[0]): y_i = F.reshape(y[i], (-1,1)) t_i = F.reshape(t[i], (-1,1)) loss += SoftDTWLoss(self.gamma)(y_i, t_i) return loss else: raise ValueError("Unknown loss")
Example #6
Source File: cnn.py From fpl with MIT License | 6 votes |
def __call__(self, inputs): pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs) batch_size, past_len, _ = pos_x.shape h_pos = self.pos_encoder(pos_x) h_pose = self.pose_encoder(pose_x) h_ego = self.ego_encoder(ego_x) h = F.concat((h_pos, h_pose, h_ego), axis=1) # (B, C, 2) h = self.inter(h) h_pos = self.pos_decoder(h) pred_y = self.last(h_pos) # (B, 10, C+6+28) pred_y = F.swapaxes(pred_y, 1, 2) pred_y = pred_y[:, :pos_y.shape[1], :] loss = F.mean_squared_error(pred_y, pos_y) pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape) pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean return loss, pred_y, None
Example #7
Source File: cnn.py From fpl with MIT License | 6 votes |
def __call__(self, inputs): pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs) batch_size, past_len, _ = pos_x.shape h_pos = self.pos_encoder(pos_x) h_pose = self.pose_encoder(pose_x) h = F.concat((h_pos, h_pose), axis=1) # (B, C, 2) h = self.inter(h) h_pos = self.pos_decoder(h) pred_y = self.last(h_pos) # (B, 10, C+6+28) pred_y = F.swapaxes(pred_y, 1, 2) pred_y = pred_y[:, :pos_y.shape[1], :] loss = F.mean_squared_error(pred_y, pos_y) pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape) pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean return loss, pred_y, None
Example #8
Source File: cnn.py From fpl with MIT License | 6 votes |
def __call__(self, inputs): pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs) batch_size, past_len, _ = pos_x.shape h_pos = self.pos_encoder(pos_x) h_ego = self.ego_encoder(ego_x) h = F.concat((h_pos, h_ego), axis=1) # (B, C, 2) h = self.inter(h) h_pos = self.pos_decoder(h) pred_y = self.last(h_pos) # (B, 10, C+6+28) pred_y = F.swapaxes(pred_y, 1, 2) pred_y = pred_y[:, :pos_y.shape[1], :] loss = F.mean_squared_error(pred_y, pos_y) pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape) pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean return loss, pred_y, None
Example #9
Source File: trpo.py From chainerrl with MIT License | 6 votes |
def _update_vf(self, dataset): """Update the value function using a given dataset. The value function is updated via SGD to minimize TD(lambda) errors. """ xp = self.vf.xp assert 'state' in dataset[0] assert 'v_teacher' in dataset[0] dataset_iter = chainer.iterators.SerialIterator( dataset, self.vf_batch_size) while dataset_iter.epoch < self.vf_epochs: batch = dataset_iter.__next__() states = batch_states([b['state'] for b in batch], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) vs_teacher = xp.array( [b['v_teacher'] for b in batch], dtype=xp.float32) vs_pred = self.vf(states) vf_loss = F.mean_squared_error(vs_pred, vs_teacher[..., None]) self.vf_optimizer.update(lambda: vf_loss)
Example #10
Source File: basic_cnn_head.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) h = F.leaky_relu(self.conv3(h), slope=0.1) h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.leaky_relu(self.conv5(h), slope=0.1) h = F.leaky_relu(self.conv6(h), slope=0.1) h = F.clipped_relu(self.conv7(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #11
Source File: tgan_updater_lsgan.py From tgan with MIT License | 5 votes |
def update_core(self): xp = self.fsgen.xp fsgen_optimizer = self.get_optimizer('fsgen') vgen_optimizer = self.get_optimizer('vgen') vdis_optimizer = self.get_optimizer('vdis') real_video, fake_video, dis_fake, dis_real = self.forward() loss_dis_fake = F.mean_squared_error(dis_fake, xp.ones_like(dis_fake.data) * -1) loss_dis_real = F.mean_squared_error(dis_real, xp.ones_like(dis_real.data)) loss_dis = loss_dis_fake + loss_dis_real loss_fsgen = F.mean_squared_error(dis_fake, xp.zeros_like(dis_fake.data)) chainer.report({'loss_dis': loss_dis}, self.vdis) chainer.report({'loss_gen': loss_fsgen}, self.vdis) fsgen_optimizer.target.zerograds() vgen_optimizer.target.zerograds() vdis_optimizer.target.zerograds() loss_fsgen.backward() fsgen_optimizer.update() fake_video.unchain_backward() vgen_optimizer.update() loss_dis.backward() vdis_optimizer.update() self.it += 1
Example #12
Source File: ppo.py From chainerrl with MIT License | 5 votes |
def _lossfun(self, entropy, vs_pred, log_probs, vs_pred_old, log_probs_old, advs, vs_teacher): prob_ratio = F.exp(log_probs - log_probs_old) loss_policy = - F.mean(F.minimum( prob_ratio * advs, F.clip(prob_ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advs)) if self.clip_eps_vf is None: loss_value_func = F.mean_squared_error(vs_pred, vs_teacher) else: loss_value_func = F.mean(F.maximum( F.square(vs_pred - vs_teacher), F.square(_elementwise_clip(vs_pred, vs_pred_old - self.clip_eps_vf, vs_pred_old + self.clip_eps_vf) - vs_teacher) )) loss_entropy = -F.mean(entropy) self.value_loss_record.append(float(loss_value_func.array)) self.policy_loss_record.append(float(loss_policy.array)) loss = ( loss_policy + self.value_func_coef * loss_value_func + self.entropy_coef * loss_entropy ) return loss
Example #13
Source File: dqn_agent.py From DQN-chainer with MIT License | 5 votes |
def get_loss(self, state, action, reward, state_prime, episode_end): s = Variable(cuda.to_gpu(state)) s_dash = Variable(cuda.to_gpu(state_prime)) q = self.model.q_function(s) # Get Q-value # Generate Target Signals tmp = self.model_target.q_function(s_dash) # Q(s',*) tmp = list(map(np.max, tmp.data)) # max_a Q(s',a) max_q_prime = np.asanyarray(tmp, dtype=np.float32) target = np.asanyarray(copy.deepcopy(q.data.get()), dtype=np.float32) for i in range(self.replay_size): if episode_end[i][0] is True: tmp_ = np.sign(reward[i]) else: # The sign of reward is used as the reward of DQN! tmp_ = np.sign(reward[i]) + self.gamma * max_q_prime[i] target[i, action[i]] = tmp_ # TD-error clipping td = Variable(cuda.to_gpu(target)) - q # TD error td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1) zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.n_act), dtype=np.float32))) loss = F.mean_squared_error(td_clip, zero_val) return loss, q
Example #14
Source File: dqn_agent_cpu.py From DQN-chainer with MIT License | 5 votes |
def get_loss(self, state, action, reward, state_prime, episode_end): s = Variable(state) s_dash = Variable(state_prime) q = self.model.q_function(s) # Get Q-value # Generate Target Signals tmp = self.model_target.q_function(s_dash) # Q(s',*) tmp = list(map(np.max, tmp.data)) # max_a Q(s',a) max_q_prime = np.asanyarray(tmp, dtype=np.float32) target = np.asanyarray(copy.deepcopy(q.data), dtype=np.float32) for i in range(self.replay_size): if episode_end[i][0] is True: tmp_ = np.sign(reward[i]) else: # The sign of reward is used as the reward of DQN! tmp_ = np.sign(reward[i]) + self.gamma * max_q_prime[i] target[i, action[i]] = tmp_ #print(tmp_) #print(target) # TD-error clipping td = Variable(target) - q # TD error #print("TD ") #print(td.data) td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1) #print(np.round(td.data)) zero_val = Variable(np.zeros((self.replay_size, self.n_act), dtype=np.float32)) loss = F.mean_squared_error(td_clip, zero_val) return loss, q
Example #15
Source File: rnin.py From deel with MIT License | 5 votes |
def getLossDistill(self,x,t): self.loss = F.mean_squared_error(x, t) return self.loss
Example #16
Source File: dqn_agent_nature.py From DQN-chainer with MIT License | 5 votes |
def forward(self, state, action, Reward, state_dash, episode_end): num_of_batch = state.shape[0] s = Variable(state) s_dash = Variable(state_dash) Q = self.Q_func(s) # Get Q-value # Generate Target Signals tmp = self.Q_func_target(s_dash) # Q(s',*) tmp = list(map(np.max, tmp.data.get())) # max_a Q(s',a) max_Q_dash = np.asanyarray(tmp, dtype=np.float32) target = np.asanyarray(Q.data.get(), dtype=np.float32) for i in xrange(num_of_batch): if not episode_end[i][0]: tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i] else: tmp_ = np.sign(Reward[i]) action_index = self.action_to_index(action[i]) target[i, action_index] = tmp_ # TD-error clipping td = Variable(cuda.to_gpu(target)) - Q # TD error td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1) zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32))) loss = F.mean_squared_error(td_clip, zero_val) return loss, Q
Example #17
Source File: nin.py From deel with MIT License | 5 votes |
def getLossDistill(self,x,t): _t = chainer.Variable(t.data, volatile='off') self.loss = F.mean_squared_error(x, _t) return self.loss
Example #18
Source File: basic_cnn_tail.py From SeRanet with MIT License | 5 votes |
def clear(self): self.loss = None # self.accuracy = None # def forward(self, x, t): # self.clear() # #x = chainer.Variable(x_data) # x_data.astype(np.float32) # #t = chainer.Variable(t_data) # [Note]: x_data, t_data must be np.float32 type # # #self.loss = F.huber_loss(h, t, delta= 1 / 255.) # self.loss = F.mean_squared_error(self(x), t) # # self.accuracy = F.accuracy(h, t) # type inconpatible # return self.loss
Example #19
Source File: basic_cnn_small.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h = F.leaky_relu(self.conv1(x), slope=0.1) h = F.leaky_relu(self.conv2(h), slope=0.1) #h = F.leaky_relu(self.conv3(h), slope=0.1) #h = F.leaky_relu(self.conv4(h), slope=0.1) h = F.clipped_relu(self.conv3(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #20
Source File: seranet_v1.py From SeRanet with MIT License | 5 votes |
def __call__(self, x, t=None): self.clear() h1 = F.leaky_relu(self.conv1(x), slope=0.1) h1 = F.leaky_relu(self.conv2(h1), slope=0.1) h1 = F.leaky_relu(self.conv3(h1), slope=0.1) h2 = self.seranet_v1_crbm(x) # Fusion h12 = F.concat((h1, h2), axis=1) lu = F.leaky_relu(self.convlu6(h12), slope=0.1) lu = F.leaky_relu(self.convlu7(lu), slope=0.1) lu = F.leaky_relu(self.convlu8(lu), slope=0.1) ru = F.leaky_relu(self.convru6(h12), slope=0.1) ru = F.leaky_relu(self.convru7(ru), slope=0.1) ru = F.leaky_relu(self.convru8(ru), slope=0.1) ld = F.leaky_relu(self.convld6(h12), slope=0.1) ld = F.leaky_relu(self.convld7(ld), slope=0.1) ld = F.leaky_relu(self.convld8(ld), slope=0.1) rd = F.leaky_relu(self.convrd6(h12), slope=0.1) rd = F.leaky_relu(self.convrd7(rd), slope=0.1) rd = F.leaky_relu(self.convrd8(rd), slope=0.1) # Splice h = CF.splice(lu, ru, ld, rd) h = F.leaky_relu(self.conv9(h), slope=0.1) h = F.leaky_relu(self.conv10(h), slope=0.1) h = F.leaky_relu(self.conv11(h), slope=0.1) h = F.clipped_relu(self.conv12(h), z=1.0) if self.train: self.loss = F.mean_squared_error(h, t) return self.loss else: return h
Example #21
Source File: darknet19.py From YOLOv2 with MIT License | 5 votes |
def __call__(self, x, t): y = self.predictor(x) if t.ndim == 2: # use squared error when label is one hot label y = F.softmax(y) # loss = F.mean_squared_error(y, t) loss = sum_of_squared_error(y, t) accuracy = F.accuracy(y, t.data.argmax(axis=1).astype(np.int32)) else: # use softmax cross entropy when label is normal label loss = F.softmax_cross_entropy(y, t) accuracy = F.accuracy(y, t) return y, loss, accuracy
Example #22
Source File: train_qm9.py From chainer-chemistry with MIT License | 5 votes |
def rmse(x0, x1): return F.sqrt(F.mean_squared_error(x0, x1))
Example #23
Source File: train_own_dataset.py From chainer-chemistry with MIT License | 5 votes |
def rmse(x0, x1): return F.sqrt(F.mean_squared_error(x0, x1))
Example #24
Source File: autoencoder.py From chainer-libDNN with MIT License | 5 votes |
def __init__(self, model, gpu=-1): NNBase.__init__(self, model, gpu) self.optimizer = Opt.Adam() self.optimizer.setup(self.model) self.loss_function = F.mean_squared_error self.loss_param = {}
Example #25
Source File: __init__.py From deel with MIT License | 5 votes |
def backprop(self,t,x=None): if x is None: x=Tensor.context #loss = F.mean_squared_error(x.content,t.content) loss = F.softmax_cross_entropy(x.content,t.content) if Deel.train: loss.backward() accuracy = F.accuracy(x.content,t.content) self.optimizer.update() return loss.data,accuracy.data
Example #26
Source File: behavioral_cloning.py From baselines with MIT License | 5 votes |
def _loss(self, batch_obs, batch_acs): out = self.model(batch_obs) entropy = F.average(out.entropy) if self.action_wrapper == 'discrete': loss = F.softmax_cross_entropy(out.params[0], batch_acs.reshape(-1)) elif self.action_wrapper == 'continuous': loss = F.mean_squared_error(out.params[0], batch_acs) elif self.action_wrapper == 'multi-dimensional-softmax': loss = 0 for idx, logit in enumerate(out.params): expected = batch_acs[:, idx] loss += F.softmax_cross_entropy(logit, expected) loss -= entropy * self.entropy_coef return loss
Example #27
Source File: NNet.py From alpha-zero-general with MIT License | 5 votes |
def loss_v(self, targets, outputs): return F.mean_squared_error(targets[:, None], outputs)
Example #28
Source File: task.py From cloud-ml-sdk with Apache License 2.0 | 5 votes |
def main(): # Define train function def linear_train(train_data, train_target, n_epochs=200): for _ in range(n_epochs): output = linear_function(train_data) loss = F.mean_squared_error(train_target, output) linear_function.zerograds() loss.backward() optimizer.update() # Construct train data x = 30 * np.random.rand(1000).astype(np.float32) y = 7 * x + 10 y += 10 * np.random.randn(1000).astype(np.float32) linear_function = L.Linear(1, 1) x_var = Variable(x.reshape(1000, -1)) y_var = Variable(y.reshape(1000, -1)) optimizer = optimizers.MomentumSGD(lr=0.001) optimizer.setup(linear_function) for i in range(150): linear_train(x_var, y_var, n_epochs=20) y_pred = linear_function(x_var).data slope = linear_function.W.data[0, 0] intercept = linear_function.b.data[0] print("Final Line: {0:.3}x + {1:.3}".format(slope, intercept))
Example #29
Source File: evaluator.py From 3dpose_gan with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] gen = self._targets['gen'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): xy_proj, xyz, scale = self.converter(batch, self.device) xy_proj, xyz = xy_proj[:, 0], xyz[:, 0] with function.no_backprop_mode(), \ chainer.using_config('train', False): xy_real = chainer.Variable(xy_proj) z_pred = gen(xy_real) z_mse = F.mean_squared_error(z_pred, xyz[:, 2::3]) chainer.report({'z_mse': z_mse}, gen) lx = gen.xp.power(xyz[:, 0::3] - xy_proj[:, 0::2], 2) ly = gen.xp.power(xyz[:, 1::3] - xy_proj[:, 1::2], 2) lz = gen.xp.power(xyz[:, 2::3] - z_pred.data, 2) euclidean_distance = gen.xp.sqrt(lx + ly + lz).mean(axis=1) euclidean_distance *= scale[:, 0] euclidean_distance = gen.xp.mean(euclidean_distance) chainer.report( {'euclidean_distance': euclidean_distance}, gen) summary.add(observation) return summary.compute_mean()
Example #30
Source File: test_n_step_rnn.py From chainer with MIT License | 5 votes |
def __call__(self, xs, ts): h1 = [self.l1(x) for x in xs] # MultiNodeNStepRNN returns outputs of actual_rnn + delegate_variable. cell1, cell2, os, delegate_variable = self.rnn(h1) os = F.concat(os, axis=0) h2 = self.l2(os) h3 = self.l3(h2) ys = F.sum(h3, axis=0) err = F.mean_squared_error(ys, ts) err, = chainermn.functions.pseudo_connect(delegate_variable, err) return err