Python tensorflow.realdiv() Examples
The following are 7
code examples of tensorflow.realdiv().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: optimizers.py From meta-optim-public with MIT License | 6 votes |
def lr_decay(init_lr, decay, time_const, global_step): """Gets a decayed learning rate using inverse decay rule. Args: init_lr: Initial learning rate at step 0. decay: Decay exponent. time_const: Time constant of the inverse decay rule. global_step: Time step. Returns: lr: Learning rate at the current time step. """ decay = tf.constant(decay) decay_step = tf.cast(global_step, tf.float32) lr = tf.realdiv(init_lr, tf.pow(1.0 + tf.realdiv(decay_step, decay_const), self._decay_exp)) return lr
Example #2
Source File: refinement_dataset.py From inc-few-shot-attractor-public with MIT License | 5 votes |
def tf_preprocess(self, image_size=84, crop_size=92, random_crop=True, random_flip=True, random_color=True, whiten=False): inp = tf.placeholder(tf.uint8, [None, None, 3]) image = tf.realdiv(tf.cast(inp, tf.float32), 255.0) # image = debug_identity(image) if random_crop: log.info("Apply random cropping") image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size) image = tf.random_crop(image, [image_size, image_size, 3]) else: image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size) if random_flip: log.info("Apply random flipping") image = tf.image.random_flip_left_right(image) # Brightness/saturation/constrast provides small gains .2%~.5% on cifar. if random_color: image = tf.image.random_brightness(image, max_delta=63. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.2, upper=1.8) if whiten: log.info("Apply whitening") image = tf.image.per_image_whitening(image) return inp, image
Example #3
Source File: streaming_metrics.py From MAX-Image-Segmenter with Apache License 2.0 | 5 votes |
def _realdiv_maybe_zero(x, y): """Support tf.realdiv(x, y) where y may contain zeros.""" return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y))
Example #4
Source File: optimizers.py From meta-optim-public with MIT License | 5 votes |
def adam(grads, velocity_m, velocity_v, var_list, lr, beta1, beta2, epsilon): """ADAM update. Args: grads: List of gradients of the trainable variables. velocity_m: List of velocity of the trainable variables. velocity_v: List of velocity of the trainable variables. var_list: List of variables to be optimized. lr: Learning rate. beta1: First momentum. beta2: Second momentum. Returns: var_list_new: List of new variables to be assigned. velocity_m_new: List of new velocity_m to be assigned. velocity_v_new: List of new velocity_v to be assigned. """ velocity_m_new = [ beta1 * mm + (1 - beta1) * gg for gg, mm in list(zip(grads, velocity_m)) ] velocity_v_new = [ beta2 * vv + (1 - beta2) * gg * gg for gg, vv in list(zip(grads, velocity_v)) ] var_list_new = [ var - tf.realdiv(lr * mm, (tf.sqrt(vv) + epsilon)) for var, mm, vv in list(zip(var_list, velocity_m_new, velocity_v_new)) ] return var_list_new, velocity_m_new, velocity_v_new
Example #5
Source File: optimizers.py From meta-optim-public with MIT License | 5 votes |
def minimize(self, cost, var_list=None, global_step=None, gate_gradients=1): """See above in class Optimizer.""" if var_list is None: var_list = tf.trainable_variables() self._var_list = var_list if global_step is None: global_step = tf.get_variable( 'global_step', [], dtype=tf.int64, initializer=tf.constant_initializer(0, dtype=tf.int64), trainable=False) grads = tf.gradients(cost, var_list, gate_gradients=gate_gradients) self._grads = grads self._lr, self._mom = self.reparameterize(self.hyperparams['lr'], self.hyperparams['mom']) # Learning rate decay. decay = self.hyperparams['decay'] t = tf.cast(global_step, self.dtype) time_const_f = tf.constant(self._time_const, dtype=self.dtype) self._lr = self._lr * tf.pow(1.0 + tf.realdiv(t, time_const_f), -decay) grads = tf.gradients(cost, var_list, gate_gradients=True) return self.apply_gradients( list(zip(grads, var_list)), global_step=global_step)
Example #6
Source File: optimizers.py From meta-optim-public with MIT License | 5 votes |
def minimize(self, cost, var_list=None, global_step=None, gate_gradients=1): """See above in class Optimizer.""" if var_list is None: var_list = tf.trainable_variables() self._var_list = var_list if global_step is None: global_step = tf.get_variable( 'global_step', [], dtype=tf.int64, initializer=tf.constant_initializer(0, dtype=tf.int64), trainable=False) grads = tf.gradients(cost, var_list, gate_gradients=gate_gradients) self._beta1, self._beta2 = self.reparameterize( self.hyperparams['beta1'], self.hyperparams['beta2']) t = tf.cast(global_step, self.dtype) + 1.0 ratio = tf.realdiv( tf.sqrt(1.0 - tf.pow(self._beta2, t)), (1.0 - tf.pow(self._beta1, t))) self._lr = self.hyperparams['lr'] * ratio grads = tf.gradients(cost, var_list, gate_gradients=True) self._grads = grads return self.apply_gradients( list(zip(grads, var_list)), global_step=global_step)
Example #7
Source File: streaming_metrics.py From models with Apache License 2.0 | 5 votes |
def _realdiv_maybe_zero(x, y): """Support tf.realdiv(x, y) where y may contain zeros.""" return tf.where(tf.less(y, _EPSILON), tf.zeros_like(x), tf.realdiv(x, y))