Python tensorflow.contrib.layers.flatten() Examples
The following are 30
code examples of tensorflow.contrib.layers.flatten().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.layers
, or try the search function
.
Example #1
Source File: model.py From NoisyNet-DQN with MIT License | 6 votes |
def model(img_in, num_actions, scope, noisy=False, reuse=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) out = noisy_dense(out, name='noisy_fc2', size=num_actions) else: out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
Example #2
Source File: NetworkArchitecture.py From Deep-RL-agents with MIT License | 6 votes |
def build_conv(self): self.inputs = tf.placeholder(tf.float32, [None, *self.state_size], name='Input_state') with tf.variable_scope('Convolutional_Layers'): self.conv1 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.inputs, num_outputs=16, kernel_size=[8, 8], stride=[4, 4], padding='VALID') self.conv2 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv1, num_outputs=32, kernel_size=[4, 4], stride=[2, 2], padding='VALID') # Flatten the output flat_conv2 = flatten(self.conv2) self.hidden = slim.fully_connected(flat_conv2, 256, activation_fn=tf.nn.relu) return self.inputs
Example #3
Source File: nasnet_model.py From benchmarks with Apache License 2.0 | 6 votes |
def _build_aux_head(net, end_points, num_classes, hparams, scope): """Auxiliary head used for all models across all datasets.""" with tf.variable_scope(scope): aux_logits = tf.identity(net) with tf.variable_scope('aux_logits'): aux_logits = slim.avg_pool2d( aux_logits, [5, 5], stride=3, padding='VALID') aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj') aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0') aux_logits = tf.nn.relu(aux_logits) # Shape of feature map before the final layer. shape = aux_logits.shape if hparams.data_format == 'NHWC': shape = shape[1:3] else: shape = shape[2:4] aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID') aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1') aux_logits = tf.nn.relu(aux_logits) aux_logits = contrib_layers.flatten(aux_logits) aux_logits = slim.fully_connected(aux_logits, num_classes) end_points['AuxLogits'] = aux_logits
Example #4
Source File: mediator.py From CoT with MIT License | 6 votes |
def __call__(self, h): # sequence -> [b, l, v] _, l, v = h.get_shape().as_list() h = tf.reshape(h, [-1, l, 1, v]) with tf.variable_scope("textmover", reuse=tf.AUTO_REUSE): h0 = layers.convolution2d( h, v, [4, 1], [2, 1], activation_fn=tf.nn.softplus ) h1 = layers.convolution2d( h0, v, [4, 1], [1, 1], activation_fn=tf.nn.softplus ) h2 = layers.convolution2d( h1, v, [4, 1], [2, 1], activation_fn=tf.nn.softplus ) h = layers.flatten(h2) h = layers.fully_connected( h, 1, activation_fn=tf.identity ) return h
Example #5
Source File: model.py From learning2run with MIT License | 6 votes |
def model(img_in, num_actions, scope, reuse=False, layer_norm=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None) if layer_norm: value_out = layer_norm_fn(value_out, relu=True) else: value_out = tf.nn.relu(value_out) value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None) return value_out
Example #6
Source File: models.py From DQN-using-PyTorch-and-ML-Agents with GNU General Public License v3.0 | 6 votes |
def create_visual_observation_encoder(self, image_input, h_size, activation, num_layers, scope, reuse): """ Builds a set of visual (CNN) encoders. :param reuse: Whether to re-use the weights within the same scope. :param scope: The scope of the graph within which to create the ops. :param image_input: The placeholder for the image input to use. :param h_size: Hidden layer size. :param activation: What type of activation function to use for layers. :param num_layers: number of hidden layers to create. :return: List of hidden layer tensors. """ with tf.variable_scope(scope): conv1 = tf.layers.conv2d(image_input, 16, kernel_size=[8, 8], strides=[4, 4], activation=tf.nn.elu, reuse=reuse, name="conv_1") conv2 = tf.layers.conv2d(conv1, 32, kernel_size=[4, 4], strides=[2, 2], activation=tf.nn.elu, reuse=reuse, name="conv_2") hidden = c_layers.flatten(conv2) with tf.variable_scope(scope+'/'+'flat_encoding'): hidden_flat = self.create_continuous_observation_encoder(hidden, h_size, activation, num_layers, scope, reuse) return hidden_flat
Example #7
Source File: models.py From DRL_DeliveryDuel with MIT License | 6 votes |
def create_visual_encoder(self, h_size, activation, num_layers): """ Builds a set of visual (CNN) encoders. :param h_size: Hidden layer size. :param activation: What type of activation function to use for layers. :param num_layers: number of hidden layers to create. :return: List of hidden layer tensors. """ conv1 = tf.layers.conv2d(self.visual_in[-1], 16, kernel_size=[8, 8], strides=[4, 4], activation=tf.nn.elu) conv2 = tf.layers.conv2d(conv1, 32, kernel_size=[4, 4], strides=[2, 2], activation=tf.nn.elu) hidden = c_layers.flatten(conv2) for j in range(num_layers): hidden = tf.layers.dense(hidden, h_size, use_bias=False, activation=activation) return hidden
Example #8
Source File: utils.py From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 | 6 votes |
def encoder(input_tensor, output_size): '''Create encoder network. Args: input_tensor: a batch of flattened images [batch_size, 28*28] Returns: A tensor that expresses the encoder network ''' net = tf.reshape(input_tensor, [-1, 28, 28, 1]) net = layers.conv2d(net, 32, 5, stride=2) net = layers.conv2d(net, 64, 5, stride=2) net = layers.conv2d(net, 128, 5, stride=2, padding='VALID') net = layers.dropout(net, keep_prob=0.9) net = layers.flatten(net) return layers.fully_connected(net, output_size, activation_fn=None)
Example #9
Source File: utils.py From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 | 6 votes |
def decoder(input_tensor): '''Create decoder network. If input tensor is provided then decodes it, otherwise samples from a sampled vector. Args: input_tensor: a batch of vectors to decode Returns: A tensor that expresses the decoder network ''' net = tf.expand_dims(input_tensor, 1) net = tf.expand_dims(net, 1) net = layers.conv2d_transpose(net, 128, 3, padding='VALID') net = layers.conv2d_transpose(net, 64, 5, padding='VALID') net = layers.conv2d_transpose(net, 32, 5, stride=2) net = layers.conv2d_transpose( net, 1, 5, stride=2, activation_fn=tf.nn.sigmoid) net = layers.flatten(net) return net
Example #10
Source File: model.py From rl-attack-detection with MIT License | 6 votes |
def dueling_model(img_in, num_actions, scope, reuse=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores
Example #11
Source File: model.py From rl-attack-detection with MIT License | 6 votes |
def model(img_in, num_actions, scope, reuse=False, concat_softmax=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) if concat_softmax: out = tf.nn.softmax(out) return out
Example #12
Source File: build_vgg.py From tensorflow-litterbox with Apache License 2.0 | 6 votes |
def _block_output(net, endpoints, num_classes, dropout_keep_prob=0.5): with tf.variable_scope('Output'): net = layers.flatten(net, scope='Flatten') # 7 x 7 x 512 net = layers.fully_connected(net, 4096, scope='Fc1') net = endpoints['Output/Fc1'] = layers.dropout(net, dropout_keep_prob, scope='Dropout1') # 1 x 1 x 4096 net = layers.fully_connected(net, 4096, scope='Fc2') net = endpoints['Output/Fc2'] = layers.dropout(net, dropout_keep_prob, scope='Dropout2') logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits') # 1 x 1 x num_classes endpoints['Logits'] = logits return logits
Example #13
Source File: network.py From ppo with MIT License | 5 votes |
def cnn_network(convs, fcs, use_lstm, padding, inpt, masks, rnn_state, num_actions, lstm_unit, nenvs, step_size, scope): out = make_cnn(convs, padding, inpt) out = layers.flatten(out) out = make_fcs(fcs, out) rnn_out, rnn_state = make_lstm( lstm_unit, nenvs, step_size, out, masks, rnn_state) if use_lstm: out = rnn_out policy = layers.fully_connected( out, num_actions, activation_fn=None, weights_initializer=tf.orthogonal_initializer(0.1)) dist = tf.distributions.Categorical(probs=tf.nn.softmax(policy)) value = layers.fully_connected( out, 1, activation_fn=None, weights_initializer=tf.orthogonal_initializer(1.0)) return dist, value, rnn_state
Example #14
Source File: models.py From rl-attack with MIT License | 5 votes |
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = conv_out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = conv_out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out
Example #15
Source File: model.py From rl-attack with MIT License | 5 votes |
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False, concat_softmax=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 state_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) state_score = noisy_dense(state_hidden, name='noisy_fc2', size=1) else: state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 actions_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) action_scores = noisy_dense(actions_hidden, name='noisy_fc2', size=num_actions) else: actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores
Example #16
Source File: fully_conv.py From pysc2-rl-agents with MIT License | 5 votes |
def spatial_output(self, x): logits = layers.conv2d(x, 1, kernel_size=1, stride=1, activation_fn=None, data_format=self.data_format) logits = layers.flatten(self.to_nhwc(logits)) return tf.nn.softmax(logits)
Example #17
Source File: SE_ResNeXt.py From SENet-Tensorflow with MIT License | 5 votes |
def Build_SEnet(self, input_x): # only cifar10 architecture input_x = self.first_layer(input_x, scope='first_layer') x = self.residual_layer(input_x, out_dim=64, layer_num='1') x = self.residual_layer(x, out_dim=128, layer_num='2') x = self.residual_layer(x, out_dim=256, layer_num='3') x = Global_Average_Pooling(x) x = flatten(x) x = Fully_connected(x, layer_name='final_fully_connected') return x
Example #18
Source File: SE_Inception_v4.py From SENet-Tensorflow with MIT License | 5 votes |
def Build_SEnet(self, input_x): input_x = tf.pad(input_x, [[0, 0], [32, 32], [32, 32], [0, 0]]) # size 32 -> 96 # only cifar10 architecture x = self.Stem(input_x, scope='stem') for i in range(4) : x = self.Inception_A(x, scope='Inception_A'+str(i)) channel = int(np.shape(x)[-1]) x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_A'+str(i)) x = self.Reduction_A(x, scope='Reduction_A') for i in range(7) : x = self.Inception_B(x, scope='Inception_B'+str(i)) channel = int(np.shape(x)[-1]) x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_B'+str(i)) x = self.Reduction_B(x, scope='Reduction_B') for i in range(3) : x = self.Inception_C(x, scope='Inception_C'+str(i)) channel = int(np.shape(x)[-1]) x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_C'+str(i)) x = Global_Average_Pooling(x) x = Dropout(x, rate=0.2, training=self.training) x = flatten(x) x = Fully_connected(x, layer_name='final_fully_connected') return x
Example #19
Source File: models.py From sonic_contest with MIT License | 5 votes |
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = conv_out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = conv_out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out
Example #20
Source File: model.py From rl-attack with MIT License | 5 votes |
def fprop(self, img_in, **kwargs): del kwargs #def model(img_in, num_actions, scope, noisy=False, reuse=False, concat_softmax=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(self.scope, reuse=self.reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): if self.noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) out = noisy_dense(out, name='noisy_fc2', size=self.num_actions) else: out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=self.num_actions, activation_fn=None) #V: Softmax - inspired by deep-rl-attack # #if concat_softmax: #prob = tf.nn.softmax(out) #return out return {self.O_LOGITS: out, self.O_PROBS: tf.nn.softmax(logits=out)}
Example #21
Source File: NetworkArchitecture.py From Deep-RL-agents with MIT License | 5 votes |
def build_model(self): self.inputs = tf.placeholder(tf.float32, [None, *self.state_size], name='Input_state') if parameters.CONV: with tf.variable_scope('Convolutional_Layers'): self.conv1 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.inputs, num_outputs=32, kernel_size=[8, 8], stride=[4, 4], padding='VALID') self.conv2 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv1, num_outputs=64, kernel_size=[4, 4], stride=[2, 2], padding='VALID') self.conv3 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv2, num_outputs=64, kernel_size=[3, 3], stride=[1, 1], padding='VALID') # Flatten the output self.hidden = flatten(self.conv3) else: self.hidden = slim.fully_connected(self.inputs, 64, activation_fn=tf.nn.relu) return self.inputs
Example #22
Source File: models.py From qmap with MIT License | 5 votes |
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False): with tf.variable_scope(scope, reuse=reuse): inpt = tf.cast(inpt, tf.float32) inpt = tf.div(inpt, 255.) out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = conv_out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = conv_out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out
Example #23
Source File: NetworkArchitecture.py From Deep-RL-agents with MIT License | 5 votes |
def build_model(self): self.inputs = tf.placeholder(tf.float32, [None, *self.state_size], name='Input_state') if parameters.CONV: with tf.variable_scope('Convolutional_Layers'): self.conv1 = slim.conv2d(activation_fn=tf.nn.elu, inputs=self.inputs, num_outputs=32, kernel_size=[8, 8], stride=[4, 4], padding='VALID') self.conv2 = slim.conv2d(activation_fn=tf.nn.elu, inputs=self.conv1, num_outputs=64, kernel_size=[4, 4], stride=[2, 2], padding='VALID') self.conv3 = slim.conv2d(activation_fn=tf.nn.elu, inputs=self.conv2, num_outputs=64, kernel_size=[3, 3], stride=[1, 1], padding='VALID') # Flatten the output self.hidden = flatten(self.conv3) else: self.hidden = slim.fully_connected(self.inputs, 64, activation_fn=tf.nn.elu) return self.inputs
Example #24
Source File: models.py From baselines with MIT License | 5 votes |
def build_q_func(network, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs): if isinstance(network, str): from baselines.common.models import get_network_builder network = get_network_builder(network)(**network_kwargs) def q_func_builder(input_placeholder, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): latent = network(input_placeholder) if isinstance(latent, tuple): if latent[1] is not None: raise NotImplementedError("DQN is not compatible with recurrent policies yet") latent = latent[0] latent = layers.flatten(latent) with tf.variable_scope("action_value"): action_out = latent for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = latent for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out return q_func_builder
Example #25
Source File: models.py From rl-attack-detection with MIT License | 5 votes |
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores_centered else: return action_scores return out
Example #26
Source File: test_tf_graph.py From hiddenlayer with MIT License | 5 votes |
def __init__(self): self.graph = tf.Graph() with self.graph.as_default(): # Global step counter self.global_step = tf.Variable(0, trainable=False, name='global_step') # Placeholders self.images = tf.placeholder(tf.float32, [None, 32, 32, 3], name="images") self.labels = tf.placeholder(tf.int32, [None], name="labels") # Layers self.conv1 = self.conv(self.images, 8) self.pool1 = self.pool(self.conv1) self.conv2 = self.conv(self.pool1, 12) self.pool2 = self.pool(self.conv2) self.conv3 = self.conv(self.pool2, 16) self.pool3 = self.pool(self.conv3) self.flat = layers.flatten(self.pool3) # TODO self.h1 = layers.fully_connected(self.flat, 200, lrelu) self.logits = layers.fully_connected(self.flat, 62, lrelu) # Convert one-hot vector to label index (int). self.predicted_labels = tf.argmax(self.logits, 1) # Loss self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.logits, labels=self.labels, name="test_name")) # Training Ops self.train = tf.train.AdamOptimizer(learning_rate=0.001)\ .minimize(self.loss, global_step=self.global_step) self.init = tf.global_variables_initializer() # Create session self.session = tf.Session() # Run initialization op self.session.run(self.init)
Example #27
Source File: model.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def model(img_in, num_actions, scope, noisy=False, reuse=False, concat_softmax=False): with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) out = noisy_dense(out, name='noisy_fc2', size=num_actions) else: out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) # V: Softmax - inspired by deep-rl-attack # if concat_softmax: out = tf.nn.softmax(out) return out
Example #28
Source File: model.py From NoisyNet-DQN with MIT License | 5 votes |
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 state_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) state_score = noisy_dense(state_hidden, name='noisy_fc2', size=1) else: state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 actions_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) action_scores = noisy_dense(actions_hidden, name='noisy_fc2', size=num_actions) else: actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores
Example #29
Source File: models.py From self-imitation-learning with MIT License | 5 votes |
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = conv_out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = conv_out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out
Example #30
Source File: models.py From mario-rl-tutorial with Apache License 2.0 | 5 votes |
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = conv_out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = conv_out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out