Python tensorflow.python.layers.core.dropout() Examples

The following are 9 code examples of tensorflow.python.layers.core.dropout(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.layers.core , or try the search function .
Example #1
Source File: convnet_builder.py    From benchmarks with The Unlicense 6 votes vote down vote up
def dropout(self, keep_prob=0.5, input_layer=None):
    if input_layer is None:
      input_layer = self.top_layer
    else:
      self.top_size = None
    name = 'dropout' + str(self.counts['dropout'])
    with tf.variable_scope(name):
      if not self.phase_train:
        keep_prob = 1.0
      if self.use_tf_layers:
        dropout = core_layers.dropout(input_layer, 1. - keep_prob,
                                      training=self.phase_train)
      else:
        dropout = tf.nn.dropout(input_layer, keep_prob)
      self.top_layer = dropout
      return dropout 
Example #2
Source File: convnet_builder.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def dropout(self, keep_prob=0.5, input_layer=None):
    if input_layer is None:
      input_layer = self.top_layer
    else:
      self.top_size = None
    name = 'dropout' + str(self.counts['dropout'])
    with tf.variable_scope(name):
      if not self.phase_train:
        keep_prob = 1.0
      if self.use_tf_layers:
        dropout = core_layers.dropout(input_layer, 1. - keep_prob,
                                      training=self.phase_train)
      else:
        dropout = tf.nn.dropout(input_layer, keep_prob)
      self.top_layer = dropout
      return dropout 
Example #3
Source File: small.py    From dirt-t with MIT License 5 votes vote down vote up
def classifier(x, phase, enc_phase=1, trim=0, scope='class', reuse=None, internal_update=False, getter=None):
    with tf.variable_scope(scope, reuse=reuse, custom_getter=getter):
        with arg_scope([leaky_relu], a=0.1), \
             arg_scope([conv2d, dense], activation=leaky_relu, bn=True, phase=phase), \
             arg_scope([batch_norm], internal_update=internal_update):

            preprocess = instance_norm if args.inorm else tf.identity
            layout = [
                (preprocess, (), {}),
                (conv2d, (64, 3, 1), {}),
                (conv2d, (64, 3, 1), {}),
                (conv2d, (64, 3, 1), {}),
                (max_pool, (2, 2), {}),
                (dropout, (), dict(training=phase)),
                (noise, (1,), dict(phase=phase)),
                (conv2d, (64, 3, 1), {}),
                (conv2d, (64, 3, 1), {}),
                (conv2d, (64, 3, 1), {}),
                (max_pool, (2, 2), {}),
                (dropout, (), dict(training=phase)),
                (noise, (1,), dict(phase=phase)),
                (conv2d, (64, 3, 1), {}),
                (conv2d, (64, 3, 1), {}),
                (conv2d, (64, 3, 1), {}),
                (avg_pool, (), dict(global_pool=True)),
                (dense, (args.Y,), dict(activation=None))
            ]

            if enc_phase:
                start = 0
                end = len(layout) - trim
            else:
                start = len(layout) - trim
                end = len(layout)

            for i in xrange(start, end):
                with tf.variable_scope('l{:d}'.format(i)):
                    f, f_args, f_kwargs = layout[i]
                    x = f(x, *f_args, **f_kwargs)

    return x 
Example #4
Source File: convnet_builder.py    From parallax with Apache License 2.0 5 votes vote down vote up
def dropout(self, keep_prob=0.5, input_layer=None):
        if input_layer is None:
            input_layer = self.top_layer
        else:
            self.top_size = None
        name = 'dropout' + str(self.counts['dropout'])
        with tf.variable_scope(name):
            if not self.phase_train:
                keep_prob = 1.0
            if self.use_tf_layers:
                dropout = core_layers.dropout(input_layer, 1. - keep_prob)
            else:
                dropout = tf.nn.dropout(input_layer, keep_prob)
            self.top_layer = dropout
            return dropout 
Example #5
Source File: tfm_builder_densenet.py    From Centripetal-SGD with Apache License 2.0 5 votes vote down vote up
def _dropout(self, bottom, drop_rate):
        return dropout(bottom, rate=drop_rate, training=self.training) 
Example #6
Source File: convnet_builder.py    From deeplearning-benchmark with Apache License 2.0 5 votes vote down vote up
def dropout(self, keep_prob=0.5, input_layer=None):
    if input_layer is None:
      input_layer = self.top_layer
    else:
      self.top_size = None
    name = 'dropout' + str(self.counts['dropout'])
    with tf.variable_scope(name):
      if not self.phase_train:
        keep_prob = 1.0
      if self.use_tf_layers:
        dropout = core_layers.dropout(input_layer, 1. - keep_prob)
      else:
        dropout = tf.nn.dropout(input_layer, keep_prob)
      self.top_layer = dropout
      return dropout 
Example #7
Source File: convnet_builder.py    From tf-imagenet with Apache License 2.0 5 votes vote down vote up
def dropout(self, keep_prob=0.5, input_layer=None):
    if input_layer is None:
      input_layer = self.top_layer
    else:
      self.top_size = None
    name = 'dropout' + str(self.counts['dropout'])
    with tf.variable_scope(name):
      if not self.phase_train:
        keep_prob = 1.0
      if self.use_tf_layers:
        dropout = core_layers.dropout(input_layer, 1. - keep_prob)
      else:
        dropout = tf.nn.dropout(input_layer, keep_prob)
      self.top_layer = dropout
      return dropout 
Example #8
Source File: convnet_builder.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def dropout(self, keep_prob=0.5, input_layer=None):
    if input_layer is None:
      input_layer = self.top_layer
    else:
      self.top_size = None
    name = 'dropout' + str(self.counts['dropout'])
    with tf.variable_scope(name):
      if not self.phase_train:
        keep_prob = 1.0
      if self.use_tf_layers:
        dropout = core_layers.dropout(input_layer, 1. - keep_prob)
      else:
        dropout = tf.nn.dropout(input_layer, keep_prob)
      self.top_layer = dropout
      return dropout 
Example #9
Source File: dnn.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def __init__(self,
               hidden_units,
               feature_columns,
               model_dir=None,
               label_dimension=1,
               weight_column=None,
               optimizer='Adagrad',
               activation_fn=nn.relu,
               dropout=None,
               input_layer_partitioner=None,
               config=None):
    """Initializes a `DNNRegressor` instance.

    Args:
      hidden_units: Iterable of number hidden units per layer. All layers are
        fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
        second one has 32.
      feature_columns: An iterable containing all the feature columns used by
        the model. All items in the set should be instances of classes derived
        from `_FeatureColumn`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column: A string or a `_NumericColumn` created by
        `tf.feature_column.numeric_column` defining feature column representing
        weights. It is used to down weight or boost examples during training. It
        will be multiplied by the loss of the example. If it is a string, it is
        used as a key to fetch weight tensor from the `features`. If it is a
        `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
        then weight_column.normalizer_fn is applied on it to get weight tensor.
      optimizer: An instance of `tf.Optimizer` used to train the model. Defaults
        to Adagrad optimizer.
      activation_fn: Activation function applied to each layer. If `None`, will
        use `tf.nn.relu`.
      dropout: When not `None`, the probability we will drop out a given
        coordinate.
      input_layer_partitioner: Optional. Partitioner for input layer. Defaults
        to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
      config: `RunConfig` object to configure the runtime settings.
    """
    def _model_fn(features, labels, mode, config):
      return _dnn_model_fn(
          features=features,
          labels=labels,
          mode=mode,
          head=head_lib.  # pylint: disable=protected-access
          _regression_head_with_mean_squared_error_loss(
              label_dimension=label_dimension, weight_column=weight_column),
          hidden_units=hidden_units,
          feature_columns=tuple(feature_columns or []),
          optimizer=optimizer,
          activation_fn=activation_fn,
          dropout=dropout,
          input_layer_partitioner=input_layer_partitioner,
          config=config)
    super(DNNRegressor, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)