Python tensorflow.contrib.layers.fully_connected() Examples

The following are 30 code examples of tensorflow.contrib.layers.fully_connected(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers , or try the search function .
Example #1
Source File: hard_decisions_to_data_then_nn.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    super(HardDecisionsToDataThenNN, self).__init__(
        params,
        device_assigner=device_assigner,
        optimizer_class=optimizer_class,
        **kwargs)

    self.layers = [decisions_to_data.HardDecisionsToDataLayer(
        params, 0, device_assigner),
                   fully_connected.FullyConnectedLayer(
                       params, 1, device_assigner=device_assigner)] 
Example #2
Source File: es.py    From rl_algorithms with MIT License 6 votes vote down vote up
def _make_network(self, data_in, out_dim):
        """ Build the network with the same architecture following OpenAI's paper.

        Returns the final *layer* of the network, which corresponds to our
        chosen action.  There is no non-linearity for the last layer because
        different envs have different action ranges.
        """
        with tf.variable_scope("ESAgent", reuse=False):
            out = data_in
            out = layers.fully_connected(out, num_outputs=64,
                    weights_initializer = layers.xavier_initializer(uniform=True),
                    #weights_initializer = utils.normc_initializer(0.5),
                    activation_fn = tf.nn.tanh)
            out = layers.fully_connected(out, num_outputs=64,
                    weights_initializer = layers.xavier_initializer(uniform=True),
                    #weights_initializer = utils.normc_initializer(0.5),
                    activation_fn = tf.nn.tanh)
            out = layers.fully_connected(out, num_outputs=out_dim,
                    weights_initializer = layers.xavier_initializer(uniform=True),
                    #weights_initializer = utils.normc_initializer(0.5),
                    activation_fn = None)
            return out 
Example #3
Source File: ddpg.py    From rl_algorithms with MIT License 6 votes vote down vote up
def _build_net(self, input_BO, scope):
        """ The Actor network.
        
        Uses ReLUs for all hidden layers, but a tanh to the output to bound the
        action. This follows their 'low-dimensional networks' using 400 and 300
        units for the hidden layers. Set `reuse=False`. I don't use batch
        normalization or their precise weight initialization.
        """
        with tf.variable_scope(scope, reuse=False):
            hidden1 = layers.fully_connected(input_BO,
                    num_outputs=400,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.relu)
            hidden2 = layers.fully_connected(hidden1, 
                    num_outputs=300,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.relu)
            actions_BA = layers.fully_connected(hidden2,
                    num_outputs=self.ac_dim,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.tanh) # Note the tanh!
            # This should broadcast, but haven't tested with ac_dim > 1.
            actions_BA = tf.multiply(actions_BA, self.ac_high)
            return actions_BA 
Example #4
Source File: configurable_ops.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def fully_connected(self, *args, **kwargs):
    """Masks NUM_OUTPUTS from the function pointed to by 'fully_connected'.

    The object's parameterization has precedence over the given NUM_OUTPUTS
    argument. The resolution of the op names uses
    tf.contrib.framework.get_name_scope() and kwargs['scope'].

    Args:
      *args: Arguments for the operation.
      **kwargs: Key arguments for the operation.

    Returns:
      The result of the application of the function_map['fully_connected'] to
      the given 'inputs', '*args' and '**kwargs' while possibly overriding
      NUM_OUTPUTS according the parameterization.

    Raises:
      ValueError: If kwargs does not contain a key named 'scope'.
    """
    inputs = _get_from_args_or_kwargs('inputs', 0, args, kwargs)
    if inputs.shape.ndims != 2:
      raise ValueError(
          'ConfigurableOps does not suport fully_connected with rank != 2')
    fn, suffix = self._get_function_and_suffix('fully_connected')
    return self._mask(fn, suffix, *args, **kwargs) 
Example #5
Source File: variational_dropout.py    From zhusuan with MIT License 6 votes vote down vote up
def var_dropout(x, n, net_size, n_particles, is_training):
    normalizer_params = {'is_training': is_training,
                         'updates_collections': None}
    bn = zs.BayesianNet()
    h = x
    for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
        eps_mean = tf.ones([n, n_in])
        eps = bn.normal(
            'layer' + str(i) + '/eps', eps_mean, std=1.,
            n_samples=n_particles, group_ndims=1)
        h = layers.fully_connected(
            h * eps, n_out, normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        if i < len(net_size) - 2:
            h = tf.nn.relu(h)
    y = bn.categorical('y', h)
    bn.deterministic('y_logit', h)
    return bn 
Example #6
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def BuildModel(self, resnet_fn, block_fn):
    # We use this model as a test case because the slim.nets.resnet module is
    # used in some production.
    #
    # The model looks as follows:
    #
    # Image --> unit_1/shortcut
    # Image --> unit_1/conv1 --> unit_1/conv2 --> unit_1/conv3
    #
    # unit_1/shortcut + unit_1/conv3 --> unit_1 (residual connection)
    #
    # unit_1 --> unit_2/conv1  -> unit_2/conv2 --> unit_2/conv3
    #
    # unit_1 + unit_2/conv3 --> unit_2 (residual connection)
    #
    # In between, there are strided convolutions and pooling ops, but these
    # should not affect the regularizer.
    blocks = [
        block_fn('block1', base_depth=7, num_units=2, stride=2),
    ]
    image = tf.constant(0.0, shape=[1, 2, 2, NUM_CHANNELS])
    net = resnet_fn(
        image, blocks, include_root_block=False, is_training=False)[0]
    net = tf.reduce_mean(net, axis=(1, 2))
    return slim.layers.fully_connected(net, 23, scope='FC') 
Example #7
Source File: filters.py    From exposure with MIT License 6 votes vote down vote up
def extract_parameters(self, features):
    output_dim = self.get_num_filter_parameters(
    ) + self.get_num_mask_parameters()
    features = ly.fully_connected(
        features,
        self.cfg.fc1_size,
        scope='fc1',
        activation_fn=lrelu,
        weights_initializer=tf.contrib.layers.xavier_initializer())
    features = ly.fully_connected(
        features,
        output_dim,
        scope='fc2',
        activation_fn=None,
        weights_initializer=tf.contrib.layers.xavier_initializer())
    return features[:, :self.get_num_filter_parameters()], \
           features[:, self.get_num_filter_parameters():]

  # Should be implemented in child classes 
Example #8
Source File: model.py    From learning2run with MIT License 6 votes vote down vote up
def model(img_in, num_actions, scope, reuse=False, layer_norm=False):
    """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                value_out = layer_norm_fn(value_out, relu=True)
            else:
                value_out = tf.nn.relu(value_out)
            value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None)
        return value_out 
Example #9
Source File: hard_decisions_to_data_then_nn.py    From lambda-packs with MIT License 6 votes vote down vote up
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    super(HardDecisionsToDataThenNN, self).__init__(
        params,
        device_assigner=device_assigner,
        optimizer_class=optimizer_class,
        **kwargs)

    self.layers = [decisions_to_data.HardDecisionsToDataLayer(
        params, 0, device_assigner),
                   fully_connected.FullyConnectedLayer(
                       params, 1, device_assigner=device_assigner)] 
Example #10
Source File: swem_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def discriminator_res(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
    # last layer must be linear
    # H = tf.squeeze(H, [1,2])
    # pdb.set_trace()
    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    H_dis_0 = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.embed_size,
                                   biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_1',
                                   reuse=is_reuse)
    H_dis_0n = tf.nn.relu(H_dis_0)                               
    H_dis_1 = layers.fully_connected(tf.nn.dropout(H_dis_0n, keep_prob=dropout), num_outputs=opt.embed_size,
                                   biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_2',
                                   reuse=is_reuse)
    H_dis_1n = tf.nn.relu(H_dis_1) + H_dis_0
    H_dis_2 = layers.fully_connected(tf.nn.dropout(H_dis_1n, keep_prob=dropout), num_outputs=opt.embed_size,
                                   biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_3',
                                   reuse=is_reuse)
    H_dis_2n = tf.nn.relu(H_dis_2) + H_dis_1
    H_dis_3 = layers.fully_connected(tf.nn.dropout(H_dis_2n, keep_prob=dropout), num_outputs=opt.embed_size,
                                   biases_initializer=biasInit, activation_fn=None, scope=prefix + 'dis_4',
                                   reuse=is_reuse)

    logits = layers.linear(tf.nn.dropout(H_dis_3, keep_prob=dropout), num_outputs=num_outputs,
                           biases_initializer=biasInit, scope=prefix + 'dis_10', reuse=is_reuse)
    return logits 
Example #11
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def model(img_in, num_actions, scope, noisy=False, reuse=False,
          concat_softmax=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
                                       stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
                                       stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
                                       stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                out = noisy_dense(out, name='noisy_fc1', size=512,
                                  activation_fn=tf.nn.relu)
                out = noisy_dense(out, name='noisy_fc2', size=num_actions)
            else:
                out = layers.fully_connected(out, num_outputs=512,
                                             activation_fn=tf.nn.relu)
                out = layers.fully_connected(out, num_outputs=num_actions,
                                             activation_fn=None)
            # V: Softmax - inspired by deep-rl-attack #
            if concat_softmax:
                out = tf.nn.softmax(out)
        return out 
Example #12
Source File: leam_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def discriminator_2layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
    # last layer must be linear
    print(num_outputs, "===num outputs===")
    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
                                   reuse=is_reuse)
    logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
                           biases_initializer=biasInit, scope=prefix + 'dis_2', reuse=is_reuse)
    return logits 
Example #13
Source File: models.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def build_q_func(network, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        network = get_network_builder(network)(**network_kwargs)

    def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
        with tf.variable_scope(scope, reuse=reuse):
            latent = network(input_placeholder)
            if isinstance(latent, tuple):
                if latent[1] is not None:
                    raise NotImplementedError("DQN is not compatible with recurrent policies yet")
                latent = latent[0]

            latent = layers.flatten(latent)

            with tf.variable_scope("action_value"):
                action_out = latent
                for hidden in hiddens:
                    action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        action_out = layers.layer_norm(action_out, center=True, scale=True)
                    action_out = tf.nn.relu(action_out)
                action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

            if dueling:
                with tf.variable_scope("state_value"):
                    state_out = latent
                    for hidden in hiddens:
                        state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                        if layer_norm:
                            state_out = layers.layer_norm(state_out, center=True, scale=True)
                        state_out = tf.nn.relu(state_out)
                    state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
                action_scores_mean = tf.reduce_mean(action_scores, 1)
                action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
                q_out = state_score + action_scores_centered
            else:
                q_out = action_scores
            return q_out

    return q_func_builder 
Example #14
Source File: custom_cartpole.py    From DRL_DeliveryDuel with MIT License 5 votes vote down vote up
def model(inpt, num_actions, scope, reuse=False):
    """This model takes as input an observation and returns values of all actions."""
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        out = layers.fully_connected(out, num_outputs=64, activation_fn=tf.nn.tanh)
        out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
        return out 
Example #15
Source File: models.py    From DRL_DeliveryDuel with MIT License 5 votes vote down vote up
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = conv_out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                if layer_norm:
                    action_out = layers.layer_norm(action_out, center=True, scale=True)
                action_out = tf.nn.relu(action_out)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = conv_out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        state_out = layers.layer_norm(state_out, center=True, scale=True)
                    state_out = tf.nn.relu(state_out)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            q_out = state_score + action_scores_centered
        else:
            q_out = action_scores
        return q_out 
Example #16
Source File: models.py    From DRL_DeliveryDuel with MIT License 5 votes vote down vote up
def _mlp(hiddens, inpt, num_actions, scope, reuse=False, layer_norm=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        for hidden in hiddens:
            out = layers.fully_connected(out, num_outputs=hidden, activation_fn=None)
            if layer_norm:
                out = layers.layer_norm(out, center=True, scale=True)
            out = tf.nn.relu(out)
        q_out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
        return q_out 
Example #17
Source File: models.py    From ape-x with Apache License 2.0 5 votes vote down vote up
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False, data_format=None):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu,
                                           data_format=data_format)
        conv_out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = conv_out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                if layer_norm:
                    action_out = layers.layer_norm(action_out, center=True, scale=True)
                action_out = tf.nn.relu(action_out)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = conv_out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        state_out = layers.layer_norm(state_out, center=True, scale=True)
                    state_out = tf.nn.relu(state_out)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            q_out = state_score + action_scores_centered
        else:
            q_out = action_scores
        return q_out 
Example #18
Source File: models.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def _mlp(hiddens, input_, num_actions, scope, reuse=False, layer_norm=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = input_
        for hidden in hiddens:
            out = layers.fully_connected(out, num_outputs=hidden, activation_fn=None)
            if layer_norm:
                out = layers.layer_norm(out, center=True, scale=True)
            out = tf.nn.relu(out)
        q_out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
        return q_out 
Example #19
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testDefaultScopes_Dense(
      self, dense_fn, num_outputs_kwarg, expected_op_scope):
    inputs = tf.ones([1, 2])
    parameterization = {
        '{}/MatMul'.format(expected_op_scope): 5
    }
    decorator = ops.ConfigurableOps(
        parameterization=parameterization,
        function_dict={'fully_connected': dense_fn})
    _ = decorator.fully_connected(inputs, **{num_outputs_kwarg: 8})
    self.assertDictEqual(parameterization, decorator.constructed_ops) 
Example #20
Source File: hard_decisions_to_data_then_nn.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _base_inference(self, data, data_spec=None, soft=False):
    if soft:
      inference_result = self.layers[0].soft_inference_graph(data)
    else:
      inference_result = self._do_layer_inference(self.layers[0], data)

    for layer in self.layers[1:]:
      inference_result = self._do_layer_inference(layer, inference_result)

    output_size = 1 if self.is_regression else self.params.num_classes
    output = layers.fully_connected(
        inference_result, output_size, activation_fn=nn_ops.softmax)
    return output 
Example #21
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testFullyConnectedOpInputArgs(self):
    decorator = ops.ConfigurableOps(parameterization={'test/MatMul': 14})
    output = decorator.fully_connected(
        self.fc_inputs, num_outputs=87, scope='test')
    self.assertEqual(14, output.shape.as_list()[-1]) 
Example #22
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testFullyConnectedOpAllKwargs(self):
    decorator = ops.ConfigurableOps(parameterization={'test/MatMul': 13})
    output = decorator.fully_connected(
        inputs=self.fc_inputs, num_outputs=88, scope='test')
    self.assertEqual(13, output.shape.as_list()[-1]) 
Example #23
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testMapBinding(self):
    # TODO(e1): Clean up this file/test. Split to different tests
    function_dict = {
        'fully_connected': mock_fully_connected,
        'conv2d': mock_conv2d,
        'separable_conv2d': mock_separable_conv2d,
        'concat': mock_concat,
        'add_n': mock_add_n,
    }
    parameterization = {
        'fc/MatMul': 13,
        'conv/Conv2D': 15,
        'sep/separable_conv2d': 17
    }
    num_outputs = lambda res: res['args'][1]
    decorator = ops.ConfigurableOps(
        parameterization=parameterization, function_dict=function_dict)
    fc = decorator.fully_connected(self.fc_inputs, num_outputs=88, scope='fc')
    self.assertEqual('myfully_connected', fc['mock_name'])
    self.assertEqual(parameterization['fc/MatMul'], num_outputs(fc))

    conv2d = decorator.conv2d(
        self.inputs, num_outputs=11, kernel_size=3, scope='conv')
    self.assertEqual('myconv2d', conv2d['mock_name'])
    self.assertEqual(parameterization['conv/Conv2D'], num_outputs(conv2d))

    separable_conv2d = decorator.separable_conv2d(
        self.inputs, num_outputs=88, kernel_size=3, scope='sep')
    self.assertEqual('myseparable_conv2d', separable_conv2d['mock_name'])
    self.assertEqual(parameterization['sep/separable_conv2d'],
                     num_outputs(separable_conv2d))

    concat = decorator.concat(axis=1, values=[1, None, 2])
    self.assertEqual(concat['args'][0], [1, 2])
    self.assertEqual(concat['kwargs']['axis'], 1)
    with self.assertRaises(ValueError):
      _ = decorator.concat(inputs=[1, None, 2])

    add_n = decorator.add_n(name='add_n', inputs=[1, None, 2])
    self.assertEqual(add_n['args'][0], [1, 2]) 
Example #24
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def constructed_ops(self):
    """Returns a dictionary between op names built to their NUM_OUTPUTS.

       The dictionary will contain an op.name: NUM_OUTPUTS pair for each op
       constructed by the decorator. The dictionary is ordered according to the
       order items were added.
       The parameterization is accumulated during all the calls to the object's
       members, such as `conv2d`, `fully_connected` and `separable_conv2d`.
       The values used are either the values from the parameterization set for
       the object, or the values that where passed to the members.
    """
    return self._constructed_ops 
Example #25
Source File: fully_connected.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = layers.fully_connected(data, self.params.layer_size)

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations = layers.fully_connected(nn_activations,
                                                self.params.layer_size)
      return nn_activations 
Example #26
Source File: fully_connected.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = layers.fully_connected(data, 1)

      # There is always one activation per instance by definition, so squeeze
      # away the extra dimension.
      return array_ops.squeeze(nn_activations, squeeze_dims=[1]) 
Example #27
Source File: swem_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def discriminator_1layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
    # last layer must be linear
    H = tf.squeeze(H)
    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
                                   reuse=is_reuse)
    return H_dis 
Example #28
Source File: leam_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def discriminator_3layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
    # last layer must be linear
    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
                                   reuse=is_reuse)
    H_dis = layers.fully_connected(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_2',
                                   reuse=is_reuse)
    logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
                           biases_initializer=biasInit, scope=prefix + 'dis_3', reuse=is_reuse)
    return logits 
Example #29
Source File: swem_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def discriminator_2layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
    # last layer must be linear
    # H = tf.squeeze(H, [1,2])
    # pdb.set_trace()
    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
                                   reuse=is_reuse)
    logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
                           biases_initializer=biasInit, scope=prefix + 'dis_2', reuse=is_reuse)
    return logits 
Example #30
Source File: swem_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def discriminator_3layer(H, opt, dropout, prefix='', num_outputs=1, is_reuse=None):
    # last layer must be linear
    # H = tf.squeeze(H, [1,2])
    # pdb.set_trace()
    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    H_dis = layers.fully_connected(tf.nn.dropout(H, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_1',
                                   reuse=is_reuse)
    H_dis = layers.fully_connected(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=opt.H_dis,
                                   biases_initializer=biasInit, activation_fn=tf.nn.relu, scope=prefix + 'dis_2',
                                   reuse=is_reuse)
    logits = layers.linear(tf.nn.dropout(H_dis, keep_prob=dropout), num_outputs=num_outputs,
                           biases_initializer=biasInit, scope=prefix + 'dis_3', reuse=is_reuse)
    return logits