Python tensorflow.python.training.slot_creator.create_zeros_slot() Examples
The following are 19
code examples of tensorflow.python.training.slot_creator.create_zeros_slot().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.training.slot_creator
, or try the search function
.
Example #1
Source File: optimizer.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _zeros_slot(self, var, slot_name, op_name): """Find or create a slot initialized with 0.0. Args: var: A `Variable` object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A `Variable` object. """ named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var]
Example #2
Source File: optimizer.py From keras-lambda with MIT License | 6 votes |
def _zeros_slot(self, var, slot_name, op_name): """Find or create a slot initialized with 0.0. Args: var: A `Variable` object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A `Variable` object. """ named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var]
Example #3
Source File: optimizer.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _zeros_slot(self, var, slot_name, op_name): """Find or create a slot initialized with 0.0. Args: var: A `Variable` object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A `Variable` object. """ named_slots = self._slot_dict(slot_name) if _var_key(var) not in named_slots: named_slots[_var_key(var)] = slot_creator.create_zeros_slot(var, op_name) return named_slots[_var_key(var)]
Example #4
Source File: optimizer.py From deep_image_model with Apache License 2.0 | 6 votes |
def _zeros_slot(self, var, slot_name, op_name): """Find or create a slot initialized with 0.0. Args: var: A `Variable` object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A `Variable` object. """ named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var]
Example #5
Source File: optimizer.py From lambda-packs with MIT License | 6 votes |
def _zeros_slot(self, var, slot_name, op_name): """Find or create a slot initialized with 0.0. Args: var: A `Variable` object. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A `Variable` object. """ named_slots = self._slot_dict(slot_name) if _var_key(var) not in named_slots: named_slots[_var_key(var)] = slot_creator.create_zeros_slot(var, op_name) return named_slots[_var_key(var)]
Example #6
Source File: slot_creator_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testCreateZerosSlotFromVariable(self): with self.test_session(): v = tf.Variable([1.0, 2.5], name="var") with tf.control_dependencies(None): slot = slot_creator.create_zeros_slot(v, name="slot", dtype=tf.float64) tf.global_variables_initializer().run() self.assertEqual(slot.op.name, "var/slot") self.assertEqual(slot.get_shape().as_list(), [2]) self.assertEqual(slot.dtype.base_dtype, tf.float64) self.assertAllEqual(slot.eval(), [0.0, 0.0])
Example #7
Source File: slot_creator_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testCreateZerosSlotFromTensor(self): with self.test_session(): v = tf.constant([1.0, 2.5], name="const") with tf.control_dependencies(None): slot = slot_creator.create_zeros_slot(v, name="slot") tf.global_variables_initializer().run() self.assertEqual(slot.op.name, "const/slot") self.assertEqual(slot.get_shape().as_list(), [2]) self.assertEqual(slot.dtype.base_dtype, tf.float32) self.assertAllEqual(slot.eval(), [0.0, 0.0])
Example #8
Source File: rmsprop_applier.py From a3c-distributed_tensorflow with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #9
Source File: rmsprop_applier.py From a3c-distributed_tensorflow with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #10
Source File: rmsprop_applier.py From Deep-RL-agents with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #11
Source File: rmsprop_applier.py From Deep-RL-agents with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #12
Source File: rmsprop_applier.py From Deep-RL-agents with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #13
Source File: RMSPropApplier.py From Deep-RL-agents with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #14
Source File: rmsprop_applier.py From async_deep_reinforce with Apache License 2.0 | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #15
Source File: rmsprop_applier.py From pathnet with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #16
Source File: rmsprop_applier.py From thor-iqa-cvpr-2018 with Apache License 2.0 | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #17
Source File: rmsprop_applier.py From icra2017-visual-navigation with MIT License | 5 votes |
def _zeros_slot(self, var, slot_name, op_name): named_slots = self._slot_dict(slot_name) if var not in named_slots: named_slots[var] = slot_creator.create_zeros_slot(var, op_name) return named_slots[var] # TODO: in RMSProp native code, memcpy() (for CPU) and # cudaMemcpyAsync() (for GPU) are used when updating values, # and values might tend to be overwritten with results from other threads. # (Need to check the learning performance with replacing it)
Example #18
Source File: qhadam.py From qhoptim with MIT License | 4 votes |
def _apply_sparse_shared(self, grad, var, indices, scatter_add): beta1_weight, beta2_weight = self._get_beta_weights() learning_rate_tensor = math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype) beta1_tensor = math_ops.cast(self._beta1_tensor, var.dtype.base_dtype) beta2_tensor = math_ops.cast(self._beta2_tensor, var.dtype.base_dtype) nu1_tensor = math_ops.cast(self._nu1_tensor, var.dtype.base_dtype) nu2_tensor = math_ops.cast(self._nu2_tensor, var.dtype.base_dtype) epsilon_tensor = math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype) beta1_weight = math_ops.cast(beta1_weight, var.dtype.base_dtype) * beta1_tensor + 1.0 beta2_weight = math_ops.cast(beta2_weight, var.dtype.base_dtype) * beta2_tensor + 1.0 beta1_adj = 1.0 - (1.0 / beta1_weight) beta2_adj = 1.0 - (1.0 / beta2_weight) exp_avg = self.get_slot(var, "exp_avg") exp_avg_sq = self.get_slot(var, "exp_avg_sq") grad_sq = grad * grad exp_avg_tensor = state_ops.assign(exp_avg, beta1_adj * exp_avg, use_locking=self._use_locking) with ops.control_dependencies([exp_avg_tensor]): exp_avg_tensor = scatter_add(exp_avg, indices, (1.0 - beta1_adj) * grad) exp_avg_sq_tensor = state_ops.assign(exp_avg_sq, beta2_adj * exp_avg_sq, use_locking=self._use_locking) with ops.control_dependencies([exp_avg_sq_tensor]): exp_avg_sq_tensor = scatter_add(exp_avg_sq, indices, (1.0 - beta2_adj) * grad_sq) avg_grad = slot_creator.create_zeros_slot(var, self._name) avg_grad_tensor = state_ops.assign(avg_grad, nu1_tensor * exp_avg_tensor, use_locking=self._use_locking) with ops.control_dependencies([avg_grad_tensor]): avg_grad_tensor = scatter_add(avg_grad, indices, (1.0 - nu1_tensor) * grad) avg_grad_sq = slot_creator.create_zeros_slot(var, self._name) avg_grad_sq_tensor = state_ops.assign( avg_grad_sq, nu2_tensor * exp_avg_sq_tensor, use_locking=self._use_locking ) with ops.control_dependencies([avg_grad_sq_tensor]): avg_grad_sq_tensor = scatter_add(avg_grad_sq, indices, (1.0 - nu2_tensor) * grad_sq) avg_grad_rms_tensor = math_ops.sqrt(avg_grad_sq_tensor) var_update = state_ops.assign_add( var, -learning_rate_tensor * avg_grad_tensor / (avg_grad_rms_tensor + epsilon_tensor), use_locking=self._use_locking, ) return control_flow_ops.group(*[var_update, exp_avg_tensor, exp_avg_sq_tensor])
Example #19
Source File: stochastic_weight_averaging.py From swa-tf with BSD 2-Clause "Simplified" License | 4 votes |
def apply(self, var_list=None): if var_list is None: var_list = variables.trainable_variables() for var in var_list: if var.dtype.base_dtype not in [dtypes.float16, dtypes.float32, dtypes.float64]: raise TypeError("The variables must be half, float, or double: %s" % var.name) if var not in self._averages: # For variables: to lower communication bandwidth across devices we keep # the moving averages on the same device as the variables. For other # tensors, we rely on the existing device allocation mechanism. with ops.init_scope(): if isinstance(var, variables.Variable): avg = slot_creator.create_slot(var, var.initialized_value(), self.name, colocate_with_primary=True) # NOTE(mrry): We only add `tf.Variable` objects to the # `MOVING_AVERAGE_VARIABLES` collection. ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var) else: avg = slot_creator.create_zeros_slot( var, self.name, colocate_with_primary=(var.op.type in ["Variable", "VariableV2", "VarHandleOp"])) self._averages[var] = avg with ops.device('/cpu:0'): self._n_models = variable_scope.get_variable(shape=[], dtype=dtypes.float32, name='n_models', initializer=init_ops.constant_initializer(0.), trainable=False) with ops.name_scope(self.name) as scope: updates = [] for var in var_list: updates.append(assign_stochastic_average(self._averages[var], var, self._n_models)) with ops.control_dependencies(updates): update_n_models = state_ops.assign_add(self._n_models, 1., name=scope) return update_n_models