Python tensorflow.assign_sub() Examples

The following are 30 code examples of tensorflow.assign_sub(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: optimizers.py    From zhusuan with MIT License 6 votes vote down vote up
def _apply_dense(self, grad, var):
        lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            # Can't use 1e-8 due to underflow
            eps = 1e-7
        else:
            eps = 1e-8

        v = self.get_slot(var, "v")
        v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = v_t / m_t

        var_update = tf.assign_sub(var, lr_t * g_t)
        return tf.group(*[var_update, m_t, v_t]) 
Example #2
Source File: ops.py    From pix2vox with GNU General Public License v3.0 6 votes vote down vote up
def batch_norm(x, train, name, decay=0.99, epsilon=1e-5):
    shape = x.get_shape().as_list()
    with tf.variable_scope(name):
        beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.))
        gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
        pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
        pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)

        if pop_mean not in tf.moving_average_variables():
            tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean)
            tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var)

        def func1():
            # Execute at training time
            batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
            update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean))
            update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
            with tf.control_dependencies([update_mean, update_var]):
                return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)

        def func2():
            # Execute at test time
            return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon)

        return tf.cond(train, func1, func2) 
Example #3
Source File: graph.py    From 3D-point-cloud-generation with MIT License 6 votes vote down vote up
def batchNormalization(opt,input,type):
	with tf.variable_scope("batchNorm"):
		globalMean = tf.get_variable("mean",shape=[input.shape[-1]],dtype=tf.float32,trainable=False,
											initializer=tf.constant_initializer(0.0))
		globalVar = tf.get_variable("var",shape=[input.shape[-1]],dtype=tf.float32,trainable=False,
										  initializer=tf.constant_initializer(1.0))
		if opt.training:
			if type=="conv": batchMean,batchVar = tf.nn.moments(input,axes=[0,1,2])
			elif type=="fc": batchMean,batchVar = tf.nn.moments(input,axes=[0])
			trainMean = tf.assign_sub(globalMean,(1-opt.BNdecay)*(globalMean-batchMean))
			trainVar = tf.assign_sub(globalVar,(1-opt.BNdecay)*(globalVar-batchVar))
			with tf.control_dependencies([trainMean,trainVar]):
				output = tf.nn.batch_normalization(input,batchMean,batchVar,None,None,opt.BNepsilon)
		else: output = tf.nn.batch_normalization(input,globalMean,globalVar,None,None,opt.BNepsilon)
	return output

# L1 loss 
Example #4
Source File: cdk.py    From xRBM with MIT License 6 votes vote down vote up
def apply_updates(self, model, grads):
        """
        Updates the model parameters based on the given gradients, using momentum
        """
        update_ops = []
        mom_ops = []
        
        if isinstance(self._learning_rate, list):
            lrs = self._learning_rate
            print('d')
        else:
            lrs = [self._learning_rate for p in model.model_params]

        with tf.name_scope('CDLearning/updates'):
            for param, grad, mv, lr in zip(model.model_params, grads, self._momentum_vector, lrs):
                mv = tf.assign(mv, self._momentum * mv + grad * lr)
                update_ops.append(tf.assign_sub(param, mv))
                
        return update_ops, mom_ops 
Example #5
Source File: sg_optimize.py    From sugartensor with MIT License 6 votes vote down vote up
def _apply_dense(self, grad, var):
        lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
        else:
            eps = 1e-8

        v = self.get_slot(var, "v")
        v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = v_t / m_t

        var_update = tf.assign_sub(var, lr_t * g_t)
        return tf.group(*[var_update, m_t, v_t]) 
Example #6
Source File: ops.py    From voxel-dcgan with MIT License 6 votes vote down vote up
def batch_norm(x, train, name, decay=0.99, epsilon=1e-5):
    shape = x.get_shape().as_list()
    with tf.variable_scope(name):
        beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.))
        gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
        pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
        pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)

        if pop_mean not in tf.moving_average_variables():
            tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean)
            tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var)

        def func1():
            # execute at training time
            batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
            update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean))
            update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
            with tf.control_dependencies([update_mean, update_var]):
                return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)

        def func2():
            # execute at test time
            return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon)

        return tf.cond(train, func1, func2) 
Example #7
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #8
Source File: tf.py    From rafiki with Apache License 2.0 5 votes vote down vote up
def _make_train_op(self, item_logits, item_idxs, scores):
        batch_size = self._batch_size
        base_decay = 0.99
        learning_rate = 0.0035
        adam_beta1 = 0
        adam_epsilon = 1e-3
        entropy_weight = 0.0001

        # Compute log probs & entropy
        sample_log_probs = self._compute_sample_log_probs(item_idxs, item_logits)
        sample_entropy = self._compute_sample_entropy(item_logits)

        # Compute rewards in a batch
        # Adding entropy encourages exploration
        rewards = scores
        rewards += entropy_weight * sample_entropy

        # Baseline reward for REINFORCE
        reward_base = tf.Variable(0., name='reward_base', dtype=tf.float32, trainable=False)

        # Update baseline whenever reward updates
        base_update = tf.assign_sub(reward_base, (1 - base_decay) * (reward_base - tf.reduce_mean(rewards)))
        with tf.control_dependencies([base_update]):
            rewards = tf.identity(rewards)

        # Compute losses in a batch
        losses = sample_log_probs * (rewards - reward_base)

        # Add optimizer
        tf_vars = self._get_all_variables()
        steps = tf.Variable(0, name='steps', dtype=tf.int32, trainable=False)
        grads = tf.gradients(losses, tf_vars)
        grads = [x / tf.constant(batch_size, dtype=tf.float32) for x in grads] # Average all gradients
        opt = tf.train.AdamOptimizer(learning_rate, beta1=adam_beta1, epsilon=adam_epsilon,
                                    use_locking=True)
        train_op = opt.apply_gradients(zip(grads, tf_vars), global_step=steps)
        
        return (train_op, losses, rewards) 
Example #9
Source File: tensorflow_backend.py    From keras-lambda with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #10
Source File: metric.py    From VAE-GAN with MIT License 5 votes vote down vote up
def _assign_moving_average(variable, value, decay):
    with tf.name_scope(None, 'AssignMovingAvg', [variable, value, decay]) as scope:
        decay = tf.convert_to_tensor(decay, name='decay', dtype=tf.float32)
        update_delta = (variable - value) * decay
        return tf.assign_sub(variable, update_delta, name=scope) 
Example #11
Source File: l2_attack.py    From Image-Captioning-Attack with Apache License 2.0 5 votes vote down vote up
def adam_optimizer_tf(self, loss, var):
        with tf.name_scope("adam_optimier"):
            self.grad = tf.gradients(loss, var)[0]
            self.grad_norm = tf.norm(self.grad)
            # self.noise = tf.random_normal(self.grad.shape, 0.0, 1.0)
            self.noise = 0
            self.beta1 = tf.constant(0.9)
            self.beta2 = tf.constant(0.999)
            self.lr = tf.constant(self.LEARNING_RATE)
            self.epsilon = 1e-8
            self.epoch = tf.Variable(0, dtype = tf.float32)
            self.mt = tf.Variable(np.zeros(var.shape), dtype = tf.float32)
            self.vt = tf.Variable(np.zeros(var.shape), dtype = tf.float32)

            new_mt = self.beta1 * self.mt + (1 - self.beta1) * self.grad
            new_vt = self.beta2 * self.vt + (1 - self.beta2) * tf.square(self.grad)
            corr = (tf.sqrt(1 - tf.pow(self.beta2, self.epoch))) / (1 - tf.pow(self.beta1, self.epoch))
            # delta = self.lr * corr * (new_mt / (tf.sqrt(new_vt) + self.epsilon))
            delta = self.lr * corr * ((new_mt / tf.sqrt(new_vt + self.epsilon)) + self.noise / tf.sqrt(self.epoch + 1))
            # delta = self.lr * (self.grad + self.noise)
            
            self.new_var = var - delta
            self.updated_newimg = tf.tanh(self.new_var + self.timg)
            assign_var = tf.assign_sub(var, delta)
            assign_mt = tf.assign(self.mt, new_mt)
            assign_vt = tf.assign(self.vt, new_vt)
            assign_epoch = tf.assign_add(self.epoch, 1)
            return tf.group(assign_var, assign_mt, assign_vt, assign_epoch) 
Example #12
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #13
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #14
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #15
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #16
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #17
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #18
Source File: micro_controller.py    From NAS-Benchmark with GNU General Public License v3.0 5 votes vote down vote up
def build_trainer(self, child_model):
    child_model.build_valid_rl()
    self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) /
                      tf.to_float(child_model.batch_size))
    self.reward = self.valid_acc

    if self.entropy_weight is not None:
      self.reward += self.entropy_weight * self.sample_entropy

    self.sample_log_prob = tf.reduce_sum(self.sample_log_prob)
    self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
    baseline_update = tf.assign_sub(
      self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))

    with tf.control_dependencies([baseline_update]):
      self.reward = tf.identity(self.reward)
    self.loss = self.sample_log_prob * (self.reward - self.baseline)
    self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="train_step")

    tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)]
    print("-" * 80)
    for var in tf_variables:
      print(var)

    self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
      self.loss,
      tf_variables,
      self.train_step,
      clip_mode=self.clip_mode,
      grad_bound=self.grad_bound,
      l2_reg=self.l2_reg,
      lr_init=self.lr_init,
      lr_dec_start=self.lr_dec_start,
      lr_dec_every=self.lr_dec_every,
      lr_dec_rate=self.lr_dec_rate,
      optim_algo=self.optim_algo,
      sync_replicas=self.sync_replicas,
      num_aggregate=self.num_aggregate,
      num_replicas=self.num_replicas)

    self.skip_rate = tf.constant(0.0, dtype=tf.float32) 
Example #19
Source File: l2_attack.py    From ImageCaptioningAttack with Apache License 2.0 5 votes vote down vote up
def adam_optimizer_tf(self, loss, var):
        with tf.name_scope("adam_optimier"):
            self.grad = tf.gradients(loss, var)[0]
            self.grad_norm = tf.norm(self.grad)
            # self.noise = tf.random_normal(self.grad.shape, 0.0, 1.0)
            self.noise = 0
            self.beta1 = tf.constant(0.9)
            self.beta2 = tf.constant(0.999)
            self.lr = tf.constant(self.LEARNING_RATE)
            self.epsilon = 1e-8
            self.epoch = tf.Variable(0, dtype = tf.float32)
            self.mt = tf.Variable(np.zeros(var.shape), dtype = tf.float32)
            self.vt = tf.Variable(np.zeros(var.shape), dtype = tf.float32)

            new_mt = self.beta1 * self.mt + (1 - self.beta1) * self.grad
            new_vt = self.beta2 * self.vt + (1 - self.beta2) * tf.square(self.grad)
            corr = (tf.sqrt(1 - tf.pow(self.beta2, self.epoch))) / (1 - tf.pow(self.beta1, self.epoch))
            # delta = self.lr * corr * (new_mt / (tf.sqrt(new_vt) + self.epsilon))
            delta = self.lr * corr * ((new_mt / tf.sqrt(new_vt + self.epsilon)) + self.noise / tf.sqrt(self.epoch + 1))
            # delta = self.lr * (self.grad + self.noise)
            
            self.new_var = var - delta
            self.updated_newimg = tf.tanh(self.new_var + self.timg)
            assign_var = tf.assign_sub(var, delta)
            assign_mt = tf.assign(self.mt, new_mt)
            assign_vt = tf.assign(self.vt, new_vt)
            assign_epoch = tf.assign_add(self.epoch, 1)
            return tf.group(assign_var, assign_mt, assign_vt, assign_epoch) 
Example #20
Source File: tensorflow_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #21
Source File: optimizer.py    From higan with MIT License 5 votes vote down vote up
def apply_gradients(self, grads_and_vars):
        with tf.name_scope(self.name):
            state_vars = []
            update_ops = []

            # Adjust learning rate to deal with startup bias.
            with tf.control_dependencies(None):
                b1pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False)
                b2pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False)
                state_vars += [b1pow_var, b2pow_var]
            b1pow_new = b1pow_var * self.beta1
            b2pow_new = b2pow_var * self.beta2
            update_ops += [tf.assign(b1pow_var, b1pow_new), tf.assign(b2pow_var, b2pow_new)]
            lr_new = self.learning_rate * tf.sqrt(1 - b2pow_new) / (1 - b1pow_new)

            # Construct ops to update each variable.
            for grad, var in grads_and_vars:
                with tf.control_dependencies(None):
                    m_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
                    v_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
                    state_vars += [m_var, v_var]
                m_new = self.beta1 * m_var + (1 - self.beta1) * grad
                v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad)
                var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon)
                update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, v_new), tf.assign_sub(var, var_delta)]

            # Group everything together.
            self.all_state_vars += state_vars
            return tf.group(*update_ops) 
Example #22
Source File: tensorflow_backend.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    return tf.assign_sub(x, decrement) 
Example #23
Source File: optimizer.py    From Parser-v3 with Apache License 2.0 5 votes vote down vote up
def minimize(self, loss, variables=None):
    """"""
    
    variables = variables or tf.trainable_variables()
    gradients = tf.gradients(loss, variables,
                             colocate_gradients_with_ops=True,
                             gate_gradients=True,
                             aggregation_method=2)
    gradients = {variable: gradient for variable, gradient in zip(variables, gradients) if gradient is not None}
    
    variable_steps = {}
    variable_indices = {}
    updates = [tf.assign_add(self.global_step, 1)]
    for variable, gradient in six.iteritems(gradients):
      if isinstance(gradient, tf.Tensor):
        step, update = self.dense_update(gradient, variable)
        variable_steps[variable] = step
        updates.extend(update)
      else:
        step, indices, update = self.sparse_update(gradient, variable)
        variable_steps[variable] = step
        variable_indices[variable] = indices
        updates.extend(update)
    
    variable_steps = self.clip_by_global_norm(variable_steps)
    
    for variable, step in six.iteritems(variable_steps):
      if variable in variable_indices:
        indices = variable_indices[variable]
        updates.append(tf.scatter_sub(variable, indices, step))
      else:
        updates.append(tf.assign_sub(variable, step))
    
    return tf.tuple(updates)[0]
  
  #============================================================= 
Example #24
Source File: ptb_enas_controller.py    From D-VAE with MIT License 5 votes vote down vote up
def build_trainer(self, child_model):
    # actor
    self.valid_loss = tf.to_float(child_model.rl_loss)
    self.valid_loss = tf.stop_gradient(self.valid_loss)
    self.valid_ppl = tf.exp(self.valid_loss)
    self.reward = 80.0 / self.valid_ppl

    if self.entropy_weight is not None:
      self.reward += self.entropy_weight * self.sample_entropy

    # or baseline
    self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)
    self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
    baseline_update = tf.assign_sub(
      self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))

    with tf.control_dependencies([baseline_update]):
      self.reward = tf.identity(self.reward)
    self.loss = self.sample_log_probs * (self.reward - self.baseline)

    self.train_step = tf.Variable(
        0, dtype=tf.int32, trainable=False, name="train_step")
    tf_variables = [var
        for var in tf.trainable_variables() if var.name.startswith(self.name)]

    self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
      self.loss,
      tf_variables,
      self.train_step,
      clip_mode=self.clip_mode,
      grad_bound=self.grad_bound,
      l2_reg=self.l2_reg,
      lr_init=self.lr_init,
      lr_dec_start=self.lr_dec_start,
      lr_dec_every=self.lr_dec_every,
      lr_dec_rate=self.lr_dec_rate,
      optim_algo=self.optim_algo,
      sync_replicas=self.sync_replicas,
      num_aggregate=self.num_aggregate,
      num_replicas=self.num_replicas) 
Example #25
Source File: ptb_enas_controller.py    From enas with Apache License 2.0 5 votes vote down vote up
def build_trainer(self, child_model):
    # actor
    self.valid_loss = tf.to_float(child_model.rl_loss)
    self.valid_loss = tf.stop_gradient(self.valid_loss)
    self.valid_ppl = tf.exp(self.valid_loss)
    self.reward = 80.0 / self.valid_ppl

    if self.entropy_weight is not None:
      self.reward += self.entropy_weight * self.sample_entropy

    # or baseline
    self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)
    self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
    baseline_update = tf.assign_sub(
      self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))

    with tf.control_dependencies([baseline_update]):
      self.reward = tf.identity(self.reward)
    self.loss = self.sample_log_probs * (self.reward - self.baseline)

    self.train_step = tf.Variable(
        0, dtype=tf.int32, trainable=False, name="train_step")
    tf_variables = [var
        for var in tf.trainable_variables() if var.name.startswith(self.name)]

    self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
      self.loss,
      tf_variables,
      self.train_step,
      clip_mode=self.clip_mode,
      grad_bound=self.grad_bound,
      l2_reg=self.l2_reg,
      lr_init=self.lr_init,
      lr_dec_start=self.lr_dec_start,
      lr_dec_every=self.lr_dec_every,
      lr_dec_rate=self.lr_dec_rate,
      optim_algo=self.optim_algo,
      sync_replicas=self.sync_replicas,
      num_aggregate=self.num_aggregate,
      num_replicas=self.num_replicas) 
Example #26
Source File: sg_optimize.py    From sugartensor with MIT License 5 votes vote down vote up
def _apply_dense(self, grad, var):
        lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
        beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
        else:
            eps = 1e-8

        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = grad / m_t

        var_update = tf.assign_sub(var, lr_t * g_t)
        return tf.group(*[var_update, m_t]) 
Example #27
Source File: tensorflow_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def update_sub(x, decrement):
    """Update the value of `x` by subtracting `decrement`.

    # Arguments
        x: A `Variable`.
        decrement: A tensor of same shape as `x`.

    # Returns
        The variable `x` updated.
    """
    return tf.assign_sub(x, decrement) 
Example #28
Source File: dense_update_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _initAssignSubFetch(self, x, y, use_gpu=False):
    """Initialize a param to init, and compute param -= y."""
    with self.test_session(use_gpu=use_gpu):
      p = tf.Variable(x)
      sub = tf.assign_sub(p, y)
      p.initializer.run()
      new_value = sub.eval()
      return p.eval(), new_value 
Example #29
Source File: variable_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testAssignUpdate(self):
    var = state_ops.variable_op([1, 2], tf.float32)
    added = tf.assign_add(var, [[2.0, 3.0]])
    self.assertEqual([1, 2], added.get_shape())
    subbed = tf.assign_sub(var, [[12.0, 13.0]])
    self.assertEqual([1, 2], subbed.get_shape()) 
Example #30
Source File: variable_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testAssignUpdateNoVarShape(self):
    var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
    added = tf.assign_add(var, [[2.0, 3.0]])
    self.assertEqual([1, 2], added.get_shape())
    subbed = tf.assign_sub(var, [[12.0, 13.0]])
    self.assertEqual([1, 2], subbed.get_shape())