Python tensorflow.python.layers.core.Dropout() Examples
The following are 11
code examples of tensorflow.python.layers.core.Dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.layers.core
, or try the search function
.
Example #1
Source File: layers.py From tensornets with MIT License | 5 votes |
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None, seed=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: The tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: Collection to add the outputs. scope: Optional scope for name_scope. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor representing the output of the operation. """ with variable_scope.variable_scope( scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) layer = core_layers.Dropout( rate=1 - keep_prob, noise_shape=noise_shape, seed=seed, name=sc.name, _scope=sc) outputs = layer.apply(inputs, training=is_training) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
Example #2
Source File: core.py From lambda-packs with MIT License | 5 votes |
def __init__(self, rate, noise_shape=None, seed=None, **kwargs): self.supports_masking = True # Inheritance call order: # 1) tf.layers.Dropout, 2) keras.layers.Layer, 3) tf.layers.Layer super(Dropout, self).__init__(**kwargs)
Example #3
Source File: core.py From lambda-packs with MIT License | 5 votes |
def call(self, inputs, training=None): if training is None: training = K.learning_phase() output = super(Dropout, self).call(inputs, training=training) if training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output
Example #4
Source File: core.py From lambda-packs with MIT License | 5 votes |
def get_config(self): config = {'rate': self.rate} base_config = super(Dropout, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Example #5
Source File: layers.py From lambda-packs with MIT License | 5 votes |
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: The tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: Collection to add the outputs. scope: Optional scope for name_scope. Returns: A tensor representing the output of the operation. """ with variable_scope.variable_scope( scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) layer = core_layers.Dropout(rate=1 - keep_prob, noise_shape=noise_shape, name=sc.name, _scope=sc) outputs = layer.apply(inputs, training=is_training) return utils.collect_named_outputs( outputs_collections, sc.original_name_scope, outputs)
Example #6
Source File: layers.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: the tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation. """ with variable_scope.variable_scope( scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) layer = core_layers.Dropout(rate=1 - keep_prob, noise_shape=noise_shape, name=sc.name, _scope=sc) outputs = layer.apply(inputs, training=is_training) return utils.collect_named_outputs( outputs_collections, sc.original_name_scope, outputs)
Example #7
Source File: layers.py From tf-slim with Apache License 2.0 | 5 votes |
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None, seed=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: The tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: Collection to add the outputs. scope: Optional scope for name_scope. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor representing the output of the operation. """ with variable_scope.variable_scope( scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) layer = core_layers.Dropout( rate=1 - keep_prob, noise_shape=noise_shape, seed=seed, name=sc.name, _scope=sc) outputs = layer.apply(inputs, training=is_training) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
Example #8
Source File: core.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def __init__(self, rate, noise_shape=None, seed=None, **kwargs): self.supports_masking = True # Inheritance call order: # 1) tf.layers.Dropout, 2) keras.layers.Layer, 3) tf.layers.Layer super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, **kwargs)
Example #9
Source File: core.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def call(self, inputs, training=None): if training is None: training = K.learning_phase() output = super(Dropout, self).call(inputs, training=training) if training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output
Example #10
Source File: core.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def get_config(self): config = {'rate': self.rate} base_config = super(Dropout, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Example #11
Source File: layers.py From keras-lambda with MIT License | 5 votes |
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: the tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation. """ with variable_scope.variable_scope( scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc: inputs = ops.convert_to_tensor(inputs) layer = core_layers.Dropout(rate=1 - keep_prob, noise_shape=noise_shape, name=sc.name, _scope=sc) outputs = layer.apply(inputs, training=is_training) return utils.collect_named_outputs( outputs_collections, sc.original_name_scope, outputs)