Python tensorflow.contrib.layers.dropout() Examples
The following are 30
code examples of tensorflow.contrib.layers.dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.layers
, or try the search function
.
Example #1
Source File: gait_nn.py From gait-recognition with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_arg_scope(is_training): weight_decay_l2 = 0.1 batch_norm_decay = 0.999 batch_norm_epsilon = 0.0001 with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d], weights_regularizer = slim.l2_regularizer(weight_decay_l2), biases_regularizer = slim.l2_regularizer(weight_decay_l2), weights_initializer = layers.variance_scaling_initializer(), ): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon } with slim.arg_scope([slim.batch_norm, slim.dropout], is_training = is_training): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected], activation_fn = tf.nn.elu, normalizer_fn = slim.batch_norm, normalizer_params = batch_norm_params) as scope: return scope
Example #2
Source File: shufflenet_v2.py From TF_Face_Toolbox with Apache License 2.0 | 6 votes |
def forward(self, images, num_classes=None, is_training=True): # Forward features, end_points = self.backbone(images, is_training=is_training) # Logits if is_training: assert num_classes is not None, 'num_classes must be given when is_training=True' with tf.variable_scope('classifier'): features_drop = layers.dropout(features, keep_prob=0.5, is_training=is_training) logit = layers.fully_connected(features_drop, num_classes, activation_fn=None, weights_initializer=tf.random_normal_initializer(stddev=0.001), weights_regularizer=layers.l2_regularizer(self.weight_decay), biases_initializer=None, scope='fc_classifier') logits = {} logits['logits'] = logit logits['features'] = features return logits else: # for _, var in end_points.items(): # print(var) return features
Example #3
Source File: resnet.py From TF_Face_Toolbox with Apache License 2.0 | 6 votes |
def forward(self, images, num_classes=None, is_training=True): assert num_classes is not None, 'num_classes must be given when is_training=True' # Forward features, _ = self.backbone(images, is_training=is_training) # Logits with tf.variable_scope('classifier'): features_drop = layers.dropout(features, keep_prob=0.5, is_training=is_training) logit = layers.fully_connected(features_drop, num_classes, activation_fn=None, weights_initializer=tf.random_normal_initializer(stddev=0.001), weights_regularizer=layers.l2_regularizer(self.weight_decay), biases_initializer=None, scope='fc_classifier') logits = {} logits['logits'] = logit return logits
Example #4
Source File: humanEncoder.py From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License | 6 votes |
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True): with tf.variable_scope(self.name_scope) as vs: if(reuse): vs.reuse_variables() lrelu = VAE.lrelu dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training) y = tf.concat([decoder_hidden, dec_in_enc], 1) h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu, weights_regularizer=tcl.l2_regularizer(self.re_term)) h0 = tcl.dropout(h0, 0.5, is_training=is_training) h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None, weights_regularizer=tcl.l2_regularizer(self.re_term),) h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3) return h0
Example #5
Source File: humanEncoder_ablation.py From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License | 6 votes |
def forward(self, dec_in, reuse=False, trainable=True, is_training=True): with tf.variable_scope(self.name_scope) as vs: if (reuse): vs.reuse_variables() lrelu = VAE.lrelu dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training) h0 = tcl.fully_connected(dec_in_enc, 512, scope="fc3", activation_fn=lrelu, weights_regularizer=tcl.l2_regularizer(self.re_term)) h0 = tcl.dropout(h0, 0.5, is_training=is_training) h0 = tcl.fully_connected(h0, 54, scope="fc4", activation_fn=None, weights_regularizer=tcl.l2_regularizer(self.re_term), ) h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3) return h0
Example #6
Source File: humanEncoder_cmu.py From Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics with MIT License | 6 votes |
def forward(self, decoder_hidden, dec_in, decoder_category, reuse=False, trainable=True, is_training=True): with tf.variable_scope(self.name_scope) as vs: if (reuse): vs.reuse_variables() lrelu = VAE.lrelu dec_in_enc = self.encoder.forward(dec_in, reuse=reuse, trainable=trainable, is_training=is_training) y = tf.concat([decoder_hidden, dec_in_enc], 1) h0 = tcl.fully_connected(y, 512, scope="fc3", activation_fn=lrelu, weights_regularizer=tcl.l2_regularizer(self.re_term)) h0 = tcl.dropout(h0, 0.5, is_training=is_training) h0 = tcl.fully_connected(h0, 70, scope="fc4", activation_fn=None, weights_regularizer=tcl.l2_regularizer(self.re_term), ) h0 = tf.expand_dims(tf.expand_dims(h0, 1), 3) return h0
Example #7
Source File: build_vgg.py From tensorflow-litterbox with Apache License 2.0 | 6 votes |
def _block_output(net, endpoints, num_classes, dropout_keep_prob=0.5): with tf.variable_scope('Output'): net = layers.flatten(net, scope='Flatten') # 7 x 7 x 512 net = layers.fully_connected(net, 4096, scope='Fc1') net = endpoints['Output/Fc1'] = layers.dropout(net, dropout_keep_prob, scope='Dropout1') # 1 x 1 x 4096 net = layers.fully_connected(net, 4096, scope='Fc2') net = endpoints['Output/Fc2'] = layers.dropout(net, dropout_keep_prob, scope='Dropout2') logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits') # 1 x 1 x num_classes endpoints['Logits'] = logits return logits
Example #8
Source File: utils.py From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 | 6 votes |
def encoder(input_tensor, output_size): '''Create encoder network. Args: input_tensor: a batch of flattened images [batch_size, 28*28] Returns: A tensor that expresses the encoder network ''' net = tf.reshape(input_tensor, [-1, 28, 28, 1]) net = layers.conv2d(net, 32, 5, stride=2) net = layers.conv2d(net, 64, 5, stride=2) net = layers.conv2d(net, 128, 5, stride=2, padding='VALID') net = layers.dropout(net, keep_prob=0.9) net = layers.flatten(net) return layers.fully_connected(net, output_size, activation_fn=None)
Example #9
Source File: build_vgg.py From tensorflow-litterbox with Apache License 2.0 | 5 votes |
def _build_vgg16( inputs, num_classes=1000, dropout_keep_prob=0.5, is_training=True, scope=''): """Blah""" endpoints = {} with tf.name_scope(scope, 'vgg16', [inputs]): with arg_scope( [layers.batch_norm, layers.dropout], is_training=is_training): with arg_scope( [layers.conv2d, layers.max_pool2d], stride=1, padding='SAME'): net = _block_a(inputs, endpoints, d=64, scope='Scale1') net = _block_a(net, endpoints, d=128, scope='Scale2') net = _block_b(net, endpoints, d=256, scope='Scale3') net = _block_b(net, endpoints, d=512, scope='Scale4') net = _block_b(net, endpoints, d=512, scope='Scale5') logits = _block_output(net, endpoints, num_classes, dropout_keep_prob) endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions') return logits, endpoints
Example #10
Source File: build_vgg.py From tensorflow-litterbox with Apache License 2.0 | 5 votes |
def _build_vgg19( inputs, num_classes=1000, dropout_keep_prob=0.5, is_training=True, scope=''): """Blah""" endpoints = {} with tf.name_scope(scope, 'vgg19', [inputs]): with arg_scope( [layers.batch_norm, layers.dropout], is_training=is_training): with arg_scope( [layers.conv2d, layers.max_pool2d], stride=1, padding='SAME'): net = _block_a(inputs, endpoints, d=64, scope='Scale1') net = _block_a(net, endpoints, d=128, scope='Scale2') net = _block_c(net, endpoints, d=256, scope='Scale3') net = _block_c(net, endpoints, d=512, scope='Scale4') net = _block_c(net, endpoints, d=512, scope='Scale5') logits = _block_output(net, endpoints, num_classes, dropout_keep_prob) endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions') return logits, endpoints
Example #11
Source File: build_inception_v4.py From tensorflow-litterbox with Apache License 2.0 | 5 votes |
def _block_output(net, endpoints, num_classes=1000, dropout_keep_prob=0.5, scope='Output'): with tf.variable_scope(scope): # 8 x 8 x 1536 shape = net.get_shape() net = layers.avg_pool2d(net, shape[1:3], padding='VALID', scope='Pool1_Global') endpoints['Output/Pool1'] = net # 1 x 1 x 1536 net = layers.dropout(net, dropout_keep_prob) net = layers.flatten(net) # 1536 net = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits') # num classes endpoints['Logits'] = net return net
Example #12
Source File: parsers.py From attend-copy-parse with MIT License | 5 votes |
def parse(self, x, context, is_training): with tf.variable_scope(self.scope): for i in range(4): x = tf.layers.conv1d(x, 128, 3, padding="same", activation=tf.nn.relu) # (bs, 128, 128) x = tf.layers.max_pooling1d(x, 2, 2) # (bs, 64-32-16-8, 128) x = tf.reduce_sum(x, axis=1) # (bs, 128) x = tf.concat([x, context], axis=1) # (bs, 256) for i in range(3): x = layers.fully_connected(x, 256) x = layers.dropout(x, is_training=is_training) x = layers.fully_connected(x, self.seq_out * self.n_out, activation_fn=None) return tf.reshape(x, (self.bs, self.seq_out, self.n_out))
Example #13
Source File: multiple_gpu.py From deep_image_model with Apache License 2.0 | 5 votes |
def my_model(features, target): """DNN with three hidden layers, and dropout of 0.1 probability. Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and CUDNN 6.5 V2 from NVIDIA need to be installed beforehand. Args: features: `Tensor` of input features. target: `Tensor` of targets. Returns: Tuple of predictions, loss and training op. """ # Convert the target to a one-hot tensor of shape (length of features, 3) and # with a on-value of 1 for each one-hot vector of length 3. target = tf.one_hot(target, 3, 1, 0) # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. normalizer_fn = layers.dropout normalizer_params = {'keep_prob': 0.5} with tf.device('/gpu:1'): features = layers.stack(features, layers.fully_connected, [10, 20, 10], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) with tf.device('/gpu:2'): # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(features, 3, activation_fn=None) loss = tf.contrib.losses.softmax_cross_entropy(logits, target) # Create a tensor for training op. train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return ({ 'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)}, loss, train_op)
Example #14
Source File: iris_custom_model.py From deep_image_model with Apache License 2.0 | 5 votes |
def my_model(features, target): """DNN with three hidden layers, and dropout of 0.1 probability.""" # Convert the target to a one-hot tensor of shape (length of features, 3) and # with a on-value of 1 for each one-hot vector of length 3. target = tf.one_hot(target, 3, 1, 0) # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. normalizer_fn = layers.dropout normalizer_params = {'keep_prob': 0.9} features = layers.stack(features, layers.fully_connected, [10, 20, 10], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(features, 3, activation_fn=None) loss = tf.contrib.losses.softmax_cross_entropy(logits, target) # Create a tensor for training op. train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return ({ 'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)}, loss, train_op)
Example #15
Source File: mnist.py From deep_image_model with Apache License 2.0 | 5 votes |
def conv_model(feature, target, mode): """2-layer convolution model.""" # Convert the target to a one-hot tensor of shape (batch_size, 10) and # with a on-value of 1 for each one-hot vector of length 10. target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0) # Reshape feature to 4d tensor with 2nd and 3rd dimensions being # image width and height final dimension being the number of color channels. feature = tf.reshape(feature, [-1, 28, 28, 1]) # First conv layer will compute 32 features for each 5x5 patch with tf.variable_scope('conv_layer1'): h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu) h_pool1 = max_pool_2x2(h_conv1) # Second conv layer will compute 64 features for each 5x5 patch. with tf.variable_scope('conv_layer2'): h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu) h_pool2 = max_pool_2x2(h_conv2) # reshape tensor into a batch of vectors h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # Densely connected layer with 1024 neurons. h_fc1 = layers.dropout( layers.fully_connected( h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN) # Compute logits (1 per class) and compute loss. logits = layers.fully_connected(h_fc1, 10, activation_fn=None) loss = tf.contrib.losses.softmax_cross_entropy(logits, target) # Create a tensor for training op. train_op = layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.001) return tf.argmax(logits, 1), loss, train_op
Example #16
Source File: composable_model.py From deep_image_model with Apache License 2.0 | 5 votes |
def __init__(self, num_label_columns, hidden_units, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, num_ps_replicas=0, scope=None): """Initializes DNNComposableModel objects. Args: num_label_columns: The number of label columns. hidden_units: List of hidden units per layer. All layers are fully connected. optimizer: An instance of `tf.Optimizer` used to apply gradients to the model. If `None`, will use a FTRL optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. num_ps_replicas: The number of parameter server replicas. scope: Optional scope for variables created in this model. If not scope is supplied, one is generated. """ scope = "dnn" if not scope else scope super(DNNComposableModel, self).__init__( num_label_columns=num_label_columns, optimizer=optimizer, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas, scope=scope) self._hidden_units = hidden_units self._activation_fn = activation_fn self._dropout = dropout
Example #17
Source File: extracting_weights.py From Hands-On-Deep-Learning-with-TensorFlow with MIT License | 5 votes |
def conv_learn(X, y, mode): # Ensure our images are 2d X = tf.reshape(X, [-1, 36, 36, 1]) # We'll need these in one-hot format y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0) # conv layer will compute 4 kernels for each 5x5 patch with tf.variable_scope('conv_layer'): # 5x5 convolution, pad with zeros on edges h1 = layers.convolution2d(X, num_outputs=4, kernel_size=[5, 5], activation_fn=tf.nn.relu) # 2x2 Max pooling, no padding on edges p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # Need to flatten conv output for use in dense layer p1_size = np.product( [s.value for s in p1.get_shape()[1:]]) p1f = tf.reshape(p1, [-1, p1_size ]) # densely connected layer with 32 neurons and dropout h_fc1 = layers.fully_connected(p1f, 5, activation_fn=tf.nn.relu) drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN) logits = layers.fully_connected(drop, 5, activation_fn=None) loss = tf.losses.softmax_cross_entropy(y, logits) # Setup the training function manually train_op = layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return tf.argmax(logits, 1), loss, train_op # Use generic estimator with our function
Example #18
Source File: cnn.py From Hands-On-Deep-Learning-with-TensorFlow with MIT License | 5 votes |
def conv_learn(X, y, mode): # Ensure our images are 2d X = tf.reshape(X, [-1, 36, 36, 1]) # We'll need these in one-hot format y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0) # conv layer will compute 4 kernels for each 5x5 patch with tf.variable_scope('conv_layer'): # 5x5 convolution, pad with zeros on edges h1 = layers.convolution2d(X, num_outputs=4, kernel_size=[5, 5], activation_fn=tf.nn.relu) # 2x2 Max pooling, no padding on edges p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # Need to flatten conv output for use in dense layer p1_size = np.product( [s.value for s in p1.get_shape()[1:]]) p1f = tf.reshape(p1, [-1, p1_size ]) # densely connected layer with 32 neurons and dropout h_fc1 = layers.fully_connected(p1f, 5, activation_fn=tf.nn.relu) drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN) logits = layers.fully_connected(drop, 5, activation_fn=None) loss = tf.losses.softmax_cross_entropy(y, logits) # Setup the training function manually train_op = layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return tf.argmax(logits, 1), loss, train_op # Use generic estimator with our function
Example #19
Source File: agent.py From exposure with MIT License | 5 votes |
def feature_extractor(net, output_dim, cfg): net = net - 0.5 min_feature_map_size = 4 assert output_dim % ( min_feature_map_size**2) == 0, 'output dim=%d' % output_dim size = int(net.get_shape()[2]) print('Agent CNN:') channels = cfg.base_channels print(' ', str(net.get_shape())) size /= 2 net = ly.conv2d( net, num_outputs=channels, kernel_size=4, stride=2, activation_fn=lrelu) print(' ', str(net.get_shape())) while size > min_feature_map_size: if size == min_feature_map_size * 2: channels = output_dim / (min_feature_map_size**2) else: channels *= 2 assert size % 2 == 0 size /= 2 net = ly.conv2d( net, num_outputs=channels, kernel_size=4, stride=2, activation_fn=lrelu) print(' ', str(net.get_shape())) print('before fc: ', net.get_shape()[1]) net = tf.reshape(net, [-1, output_dim]) net = tf.nn.dropout(net, cfg.dropout_keep_prob) return net # Output: float \in [0, 1]
Example #20
Source File: composable_model.py From keras-lambda with MIT License | 4 votes |
def build_model(self, features, feature_columns, is_training): """See base class.""" self._feature_columns = feature_columns input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)) with variable_scope.variable_scope( self._scope + "/input_from_feature_columns", values=features.values(), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns( features, self._get_feature_columns(), weight_collections=[self._scope], trainable=self._trainable, scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas)) for layer_id, num_hidden_units in enumerate(self._hidden_units): with variable_scope.variable_scope( self._scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=self._activation_fn, variables_collections=[self._scope], trainable=self._trainable, scope=scope) if self._dropout is not None and is_training: net = layers.dropout(net, keep_prob=(1.0 - self._dropout)) self._add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( self._scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, self._num_label_columns, activation_fn=None, variables_collections=[self._scope], trainable=self._trainable, scope=scope) self._add_hidden_layer_summary(logits, "logits") return logits
Example #21
Source File: HAN_model.py From hierarchical-attention-networks with MIT License | 4 votes |
def _init_body(self, scope): with tf.variable_scope(scope): word_level_inputs = tf.reshape(self.inputs_embedded, [ self.document_size * self.sentence_size, self.word_size, self.embedding_size ]) word_level_lengths = tf.reshape( self.word_lengths, [self.document_size * self.sentence_size]) with tf.variable_scope('word') as scope: word_encoder_output, _ = bidirectional_rnn( self.word_cell, self.word_cell, word_level_inputs, word_level_lengths, scope=scope) with tf.variable_scope('attention') as scope: word_level_output = task_specific_attention( word_encoder_output, self.word_output_size, scope=scope) with tf.variable_scope('dropout'): word_level_output = layers.dropout( word_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training, ) # sentence_level sentence_inputs = tf.reshape( word_level_output, [self.document_size, self.sentence_size, self.word_output_size]) with tf.variable_scope('sentence') as scope: sentence_encoder_output, _ = bidirectional_rnn( self.sentence_cell, self.sentence_cell, sentence_inputs, self.sentence_lengths, scope=scope) with tf.variable_scope('attention') as scope: sentence_level_output = task_specific_attention( sentence_encoder_output, self.sentence_output_size, scope=scope) with tf.variable_scope('dropout'): sentence_level_output = layers.dropout( sentence_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training, ) with tf.variable_scope('classifier'): self.logits = layers.fully_connected( sentence_level_output, self.classes, activation_fn=None) self.prediction = tf.argmax(self.logits, axis=-1)
Example #22
Source File: composable_model.py From keras-lambda with MIT License | 4 votes |
def __init__(self, num_label_columns, hidden_units, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, num_ps_replicas=0, scope=None, trainable=True): """Initializes DNNComposableModel objects. Args: num_label_columns: The number of label columns. hidden_units: List of hidden units per layer. All layers are fully connected. optimizer: An instance of `tf.Optimizer` used to apply gradients to the model. If `None`, will use a FTRL optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. num_ps_replicas: The number of parameter server replicas. scope: Optional scope for variables created in this model. If not scope is supplied, one is generated. trainable: True if this model contains variables that can be trained. False otherwise (in cases where the variables are used strictly for transforming input labels for training). """ scope = "dnn" if not scope else scope super(DNNComposableModel, self).__init__( num_label_columns=num_label_columns, optimizer=optimizer, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas, scope=scope, trainable=trainable) self._hidden_units = hidden_units self._activation_fn = activation_fn self._dropout = dropout
Example #23
Source File: ACNet.py From distributedRL_MAPF with MIT License | 4 votes |
def _build_net(self,inputs,goal_pos,RNN_SIZE,TRAINING,a_size): w_init = layers.variance_scaling_initializer() conv1 = layers.conv2d(inputs=inputs, padding="SAME", num_outputs=RNN_SIZE//4, kernel_size=[3, 3], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=tf.nn.relu) conv1a = layers.conv2d(inputs=conv1, padding="SAME", num_outputs=RNN_SIZE//4, kernel_size=[3, 3], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=tf.nn.relu) conv1b = layers.conv2d(inputs=conv1a, padding="SAME", num_outputs=RNN_SIZE//4, kernel_size=[3, 3], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=tf.nn.relu) pool1 = layers.max_pool2d(inputs=conv1b,kernel_size=[2,2]) conv2 = layers.conv2d(inputs=pool1, padding="SAME", num_outputs=RNN_SIZE//2, kernel_size=[3, 3], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=tf.nn.relu) conv2a = layers.conv2d(inputs=conv2, padding="SAME", num_outputs=RNN_SIZE//2, kernel_size=[3, 3], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=tf.nn.relu) conv2b = layers.conv2d(inputs=conv2a, padding="SAME", num_outputs=RNN_SIZE//2, kernel_size=[3, 3], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=tf.nn.relu) pool2 = layers.max_pool2d(inputs=conv2b,kernel_size=[2,2]) conv3 = layers.conv2d(inputs=pool2, padding="VALID", num_outputs=RNN_SIZE-GOAL_REPR_SIZE, kernel_size=[2, 2], stride=1, data_format="NHWC", weights_initializer=w_init,activation_fn=None) flat = tf.nn.relu(layers.flatten(conv3)) goal_layer= layers.fully_connected(inputs=goal_pos, num_outputs=GOAL_REPR_SIZE) hidden_input=tf.concat([flat,goal_layer],1) h1 = layers.fully_connected(inputs=hidden_input, num_outputs=RNN_SIZE) d1 = layers.dropout(h1, keep_prob=KEEP_PROB1, is_training=TRAINING) h2 = layers.fully_connected(inputs=d1, num_outputs=RNN_SIZE, activation_fn=None) d2 = layers.dropout(h2, keep_prob=KEEP_PROB2, is_training=TRAINING) self.h3 = tf.nn.relu(d2+hidden_input) #Recurrent network for temporal dependencies lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = tf.expand_dims(self.h3, [0]) step_size = tf.shape(inputs)[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False) lstm_c, lstm_h = lstm_state state_out = (lstm_c[:1, :], lstm_h[:1, :]) self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE]) policy_layer = layers.fully_connected(inputs=self.rnn_out, num_outputs=a_size,weights_initializer=normalized_columns_initializer(1./float(a_size)), biases_initializer=None, activation_fn=None) policy = tf.nn.softmax(policy_layer) policy_sig = tf.sigmoid(policy_layer) value = layers.fully_connected(inputs=self.rnn_out, num_outputs=1, weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None, activation_fn=None) blocking = layers.fully_connected(inputs=self.rnn_out, num_outputs=1, weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None, activation_fn=tf.sigmoid) on_goal = layers.fully_connected(inputs=self.rnn_out, num_outputs=1, weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None, activation_fn=tf.sigmoid) return policy, value, state_out ,state_in, state_init, blocking, on_goal,policy_sig
Example #24
Source File: composable_model.py From deep_image_model with Apache License 2.0 | 4 votes |
def build_model(self, features, feature_columns, is_training): """See base class.""" self._feature_columns = feature_columns input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)) with variable_scope.variable_scope( self._scope + "/input_from_feature_columns", values=features.values(), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns( features, self._get_feature_columns(), weight_collections=[self._scope], scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas)) for layer_id, num_hidden_units in enumerate(self._hidden_units): with variable_scope.variable_scope( self._scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=self._activation_fn, variables_collections=[self._scope], scope=scope) if self._dropout is not None and is_training: net = layers.dropout( net, keep_prob=(1.0 - self._dropout)) self._add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( self._scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, self._num_label_columns, activation_fn=None, variables_collections=[self._scope], scope=scope) self._add_hidden_layer_summary(logits, "logits") return logits
Example #25
Source File: gait_nn.py From gait-recognition with BSD 3-Clause "New" or "Revised" License | 4 votes |
def get_network(self, input_tensor, is_training, reuse = False): net = input_tensor with tf.variable_scope('GaitNN', reuse = reuse): with slim.arg_scope(self.get_arg_scope(is_training)): with tf.variable_scope('DownSampling'): with tf.variable_scope('17x17'): net = layers.convolution2d(net, num_outputs = 256, kernel_size = 1) slim.repeat(net, 3, self.residual_block, ch = 256, ch_inner = 64) with tf.variable_scope('8x8'): net = self.residual_block(net, ch = 512, ch_inner = 64, stride = 2) slim.repeat(net, 2, self.residual_block, ch = 512, ch_inner = 128) with tf.variable_scope('4x4'): net = self.residual_block(net, ch = 512, ch_inner = 128, stride = 2) slim.repeat(net, 1, self.residual_block, ch = 512, ch_inner = 256) net = layers.convolution2d(net, num_outputs = 256, kernel_size = 1) net = layers.convolution2d(net, num_outputs = 256, kernel_size = 3) with tf.variable_scope('FullyConnected'): # net = tf.reduce_mean(net, [1, 2], name = 'GlobalPool') net = layers.flatten(net) net = layers.fully_connected(net, 512, activation_fn = None, normalizer_fn = None) with tf.variable_scope('Recurrent', initializer = tf.contrib.layers.xavier_initializer()): cell_type = { 'GRU': tf.nn.rnn_cell.GRUCell, 'LSTM': tf.nn.rnn_cell.LSTMCell } cell = cell_type[self.recurrent_unit](self.FEATURES) cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.rnn_layers, state_is_tuple = True) net = tf.expand_dims(net, 0) net, state = tf.nn.dynamic_rnn(cell, net, initial_state = cell.zero_state(1, dtype = tf.float32)) net = tf.reshape(net, [-1, self.FEATURES]) # Temporal Avg-Pooling gait_signature = tf.reduce_mean(net, 0) if is_training: net = tf.expand_dims(gait_signature, 0) net = layers.dropout(net, 0.7) with tf.variable_scope('Logits'): net = layers.fully_connected(net, self.num_of_persons, activation_fn = None, normalizer_fn = None) return net, gait_signature, state
Example #26
Source File: build_inception_v4.py From tensorflow-litterbox with Apache License 2.0 | 4 votes |
def _build_inception_v4( inputs, stack_counts=[4, 7, 3], dropout_keep_prob=0.8, num_classes=1000, is_training=True, scope=''): """Inception v4 from http://arxiv.org/abs/ Args: inputs: a tensor of size [batch_size, height, width, channels]. dropout_keep_prob: dropout keep_prob. num_classes: number of predicted classes. is_training: whether is training or not. scope: Optional scope for op_scope. Returns: a list containing 'logits' Tensors and a dict of Endpoints. """ # endpoints will collect relevant activations for external use, for example, summaries or losses. endpoints = {} name_scope_net = tf.name_scope(scope, 'Inception_v4', [inputs]) arg_scope_train = arg_scope([layers.batch_norm, layers.dropout], is_training=is_training) arg_scope_conv = arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], stride=1, padding='SAME') with name_scope_net, arg_scope_train, arg_scope_conv: net = _block_stem(inputs, endpoints) # 35 x 35 x 384 with tf.variable_scope('Scale1'): net = _stack(net, endpoints, fn=_block_a, count=stack_counts[0], scope='BlockA') # 35 x 35 x 384 with tf.variable_scope('Scale2'): net = _block_a_reduce(net, endpoints) # 17 x 17 x 1024 net = _stack(net, endpoints, fn=_block_b, count=stack_counts[1], scope='BlockB') # 17 x 17 x 1024 with tf.variable_scope('Scale3'): net = _block_b_reduce(net, endpoints) # 8 x 8 x 1536 net = _stack(net, endpoints, fn=_block_c, count=stack_counts[2], scope='BlockC') # 8 x 8 x 1536 logits = _block_output(net, endpoints, num_classes, dropout_keep_prob, scope='Output') endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions') return logits, endpoints
Example #27
Source File: composable_model.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def build_model(self, features, feature_columns, is_training): """See base class.""" self._feature_columns = feature_columns input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)) with variable_scope.variable_scope( self._scope + "/input_from_feature_columns", values=features.values(), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns( features, self._get_feature_columns(), weight_collections=[self._scope], trainable=self._trainable, scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas)) for layer_id, num_hidden_units in enumerate(self._hidden_units): with variable_scope.variable_scope( self._scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=self._activation_fn, variables_collections=[self._scope], trainable=self._trainable, scope=scope) if self._dropout is not None and is_training: net = layers.dropout(net, keep_prob=(1.0 - self._dropout)) self._add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( self._scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, self._num_label_columns, activation_fn=None, variables_collections=[self._scope], trainable=self._trainable, scope=scope) self._add_hidden_layer_summary(logits, "logits") return logits
Example #28
Source File: composable_model.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def __init__(self, num_label_columns, hidden_units, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, num_ps_replicas=0, scope=None, trainable=True): """Initializes DNNComposableModel objects. Args: num_label_columns: The number of label columns. hidden_units: List of hidden units per layer. All layers are fully connected. optimizer: An instance of `tf.Optimizer` used to apply gradients to the model. If `None`, will use a FTRL optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. num_ps_replicas: The number of parameter server replicas. scope: Optional scope for variables created in this model. If not scope is supplied, one is generated. trainable: True if this model contains variables that can be trained. False otherwise (in cases where the variables are used strictly for transforming input labels for training). """ scope = "dnn" if not scope else scope super(DNNComposableModel, self).__init__( num_label_columns=num_label_columns, optimizer=optimizer, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas, scope=scope, trainable=trainable) self._hidden_units = hidden_units self._activation_fn = activation_fn self._dropout = dropout
Example #29
Source File: composable_model.py From lambda-packs with MIT License | 4 votes |
def build_model(self, features, feature_columns, is_training): """See base class.""" self._feature_columns = feature_columns input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)) with variable_scope.variable_scope( self._scope + "/input_from_feature_columns", values=features.values(), partitioner=input_layer_partitioner) as scope: net = layers.input_from_feature_columns( features, self._get_feature_columns(), weight_collections=[self._scope], trainable=self._trainable, scope=scope) hidden_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=self._num_ps_replicas)) for layer_id, num_hidden_units in enumerate(self._hidden_units): with variable_scope.variable_scope( self._scope + "/hiddenlayer_%d" % layer_id, values=[net], partitioner=hidden_layer_partitioner) as scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=self._activation_fn, variables_collections=[self._scope], trainable=self._trainable, scope=scope) if self._dropout is not None and is_training: net = layers.dropout(net, keep_prob=(1.0 - self._dropout)) self._add_hidden_layer_summary(net, scope.name) with variable_scope.variable_scope( self._scope + "/logits", values=[net], partitioner=hidden_layer_partitioner) as scope: logits = layers.fully_connected( net, self._num_label_columns, activation_fn=None, variables_collections=[self._scope], trainable=self._trainable, scope=scope) self._add_hidden_layer_summary(logits, "logits") return logits
Example #30
Source File: composable_model.py From lambda-packs with MIT License | 4 votes |
def __init__(self, num_label_columns, hidden_units, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, num_ps_replicas=0, scope=None, trainable=True): """Initializes DNNComposableModel objects. Args: num_label_columns: The number of label columns. hidden_units: List of hidden units per layer. All layers are fully connected. optimizer: An instance of `tf.Optimizer` used to apply gradients to the model. If `None`, will use a FTRL optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. num_ps_replicas: The number of parameter server replicas. scope: Optional scope for variables created in this model. If not scope is supplied, one is generated. trainable: True if this model contains variables that can be trained. False otherwise (in cases where the variables are used strictly for transforming input labels for training). """ scope = "dnn" if not scope else scope super(DNNComposableModel, self).__init__( num_label_columns=num_label_columns, optimizer=optimizer, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas, scope=scope, trainable=trainable) self._hidden_units = hidden_units self._activation_fn = activation_fn self._dropout = dropout