Python tensorflow.glorot_uniform_initializer() Examples

The following are 30 code examples of tensorflow.glorot_uniform_initializer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: encoders.py    From neural-structured-learning with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dim, is_train, train_dropout=1.0,
               emb_dim=None, proj_w=None, scope="attention", average=False):
    super(SigmoidNbrAttentionEmbedding, self).__init__()
    self.input_dim = input_dim
    self.scope = scope
    self.is_train = is_train
    self.dropout = train_dropout
    self.average = average
    if emb_dim:
      self.emb_dim = emb_dim
    else:
      # Keep embedding dimension same as input node embedding
      self.emb_dim = self.input_dim
    with tf.variable_scope(scope):
      if proj_w:
        self.proj_w = proj_w
      else:
        self.proj_w = tf.get_variable(
            "W_attention", shape=(2 * self.input_dim, self.emb_dim),
            initializer=tf.glorot_uniform_initializer()
        )
    if not proj_w:
      utils.add_variable_summaries(self.proj_w, self.scope + "/W_attention") 
Example #2
Source File: layer_utils.py    From EasyRL with Apache License 2.0 6 votes vote down vote up
def __init__(self, name, layer_conf):
        self._name = layer_conf.pop('name', None) or name
        activation_name = layer_conf.get('activation', None)
        if activation_name:
            layer_conf['activation'] = Layer.activation_dict[activation_name]

        self._kernel_initializer = layer_conf.pop('kernel_initializer', None)
        if isinstance(self._kernel_initializer, str):
            assert self._kernel_initializer in ('random_normal_initializer',
                                                'random_uniform_initializer',
                                                'glorot_normal_initializer',
                                                'glorot_uniform_initializer'), \
                "Invalid value of kernel_initializer, available value is one of " \
                "['random_normal_initializer', 'random_uniform_initializer'," \
                "'glorot_normal_initializer', 'glorot_uniform_initializer']"

            self._kernel_initializer = Layer.initializer_dict[
                self._kernel_initializer]
        elif (isinstance(self._kernel_initializer, int)
              or isinstance(self._kernel_initializer, float)):
            self._kernel_initializer = tf.constant_initializer(
                value=self._kernel_initializer) 
Example #3
Source File: ssd_net.py    From SSD.TensorFlow with Apache License 2.0 6 votes vote down vote up
def multibox_head(feature_layers, num_classes, num_anchors_depth_per_layer, data_format='channels_first'):
    with tf.variable_scope('multibox_head'):
        cls_preds = []
        loc_preds = []
        for ind, feat in enumerate(feature_layers):
            loc_preds.append(tf.layers.conv2d(feat, num_anchors_depth_per_layer[ind] * 4, (3, 3), use_bias=True,
                        name='loc_{}'.format(ind), strides=(1, 1),
                        padding='same', data_format=data_format, activation=None,
                        kernel_initializer=tf.glorot_uniform_initializer(),
                        bias_initializer=tf.zeros_initializer()))
            cls_preds.append(tf.layers.conv2d(feat, num_anchors_depth_per_layer[ind] * num_classes, (3, 3), use_bias=True,
                        name='cls_{}'.format(ind), strides=(1, 1),
                        padding='same', data_format=data_format, activation=None,
                        kernel_initializer=tf.glorot_uniform_initializer(),
                        bias_initializer=tf.zeros_initializer()))

        return loc_preds, cls_preds 
Example #4
Source File: nn_module.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _dense_block_mode1(x, hidden_units, dropouts, densenet=False, scope_name="dense_block", reuse=False, training=False, seed=0, bn=False):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        scope_name_i = "%s-dense_block_mode1-%s"%(str(scope_name), str(i))
        with tf.variable_scope(scope_name, reuse=reuse):
            z = tf.layers.dense(x, h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i),
                                  reuse=reuse,
                                  name=scope_name_i)
            if bn:
                z = batch_normalization(z, training=training, name=scope_name_i+"-bn")
            z = tf.nn.relu(z)
            z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
            if densenet:
                x = tf.concat([x, z], axis=-1)
            else:
                x = z
    return x 
Example #5
Source File: nn_module.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _dense_block_mode2(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        if bn:
            z = batch_normalization(x, training=training, name=name + "-" + str(i))
        z = tf.nn.relu(z)
        z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
        z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i), dtype=tf.float32,
                            bias_initializer=tf.zeros_initializer())(z)
        if densenet:
            x = tf.concat([x, z], axis=-1)
        else:
            x = z
    return x 
Example #6
Source File: nn_module.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _resnet_branch_mode1(x, hidden_units, dropouts, training, seed=0):
    h1, h2, h3 = hidden_units
    dr1, dr2, dr3 = dropouts
    name = "resnet_block"
    # branch 2
    x2 = tf.layers.Dense(h1, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 2), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x)
    x2 = tf.layers.BatchNormalization()(x2, training=training)
    # x2 = batch_normalization(x2, training=training, name=name + "-" + str(1))
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr1, seed=seed * 1)(x2, training=training) if dr1 > 0 else x2

    x2 = tf.layers.Dense(h2, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 3), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2, training=training)
    # x2 = batch_normalization(x2, training=training, name=name + "-" + str(2))
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr2, seed=seed * 2)(x2, training=training) if dr2 > 0 else x2

    x2 = tf.layers.Dense(h3, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 4), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2, training=training)
    # x2 = batch_normalization(x2, training=training, name=name + "-" + str(3))

    return x2 
Example #7
Source File: nn_module.py    From tensorflow-DSMM with MIT License 6 votes vote down vote up
def _dense_block_mode1(x, hidden_units, dropouts, densenet=False, scope_name="dense_block", reuse=False, training=False, seed=0, bn=False):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        scope_name_i = "%s-dense_block_mode1-%s"%(str(scope_name), str(i))
        with tf.variable_scope(scope_name, reuse=reuse):
            z = tf.layers.dense(x, h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i),
                                  reuse=reuse,
                                  name=scope_name_i)
            if bn:
                z = batch_normalization(z, training=training, name=scope_name_i+"-bn")
            z = tf.nn.relu(z)
            z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
            if densenet:
                x = tf.concat([x, z], axis=-1)
            else:
                x = z
    return x 
Example #8
Source File: nn_module.py    From tensorflow-DSMM with MIT License 6 votes vote down vote up
def _dense_block_mode2(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        if bn:
            z = batch_normalization(x, training=training, name=name + "-" + str(i))
        z = tf.nn.relu(z)
        z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
        z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i), dtype=tf.float32,
                            bias_initializer=tf.zeros_initializer())(z)
        if densenet:
            x = tf.concat([x, z], axis=-1)
        else:
            x = z
    return x 
Example #9
Source File: nn_module.py    From tensorflow-DSMM with MIT License 6 votes vote down vote up
def _resnet_branch_mode1(x, hidden_units, dropouts, training, seed=0):
    h1, h2, h3 = hidden_units
    dr1, dr2, dr3 = dropouts
    name = "resnet_block"
    # branch 2
    x2 = tf.layers.Dense(h1, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 2), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x)
    x2 = tf.layers.BatchNormalization()(x2, training=training)
    # x2 = batch_normalization(x2, training=training, name=name + "-" + str(1))
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr1, seed=seed * 1)(x2, training=training) if dr1 > 0 else x2

    x2 = tf.layers.Dense(h2, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 3), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2, training=training)
    # x2 = batch_normalization(x2, training=training, name=name + "-" + str(2))
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr2, seed=seed * 2)(x2, training=training) if dr2 > 0 else x2

    x2 = tf.layers.Dense(h3, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 4), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2, training=training)
    # x2 = batch_normalization(x2, training=training, name=name + "-" + str(3))

    return x2 
Example #10
Source File: nn_module.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _resnet_branch_mode1(x, hidden_units, dropouts, training, seed=0):
    h1, h2, h3 = hidden_units
    dr1, dr2, dr3 = dropouts
    # branch 2
    x2 = tf.layers.Dense(h1, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 2), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x)
    x2 = tf.layers.BatchNormalization()(x2)
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr1, seed=seed * 1)(x2, training=training) if dr1 > 0 else x2

    x2 = tf.layers.Dense(h2, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 3), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2)
    x2 = tf.nn.relu(x2)
    x2 = tf.layers.Dropout(dr2, seed=seed * 2)(x2, training=training) if dr2 > 0 else x2

    x2 = tf.layers.Dense(h3, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 4), dtype=tf.float32,
                         bias_initializer=tf.zeros_initializer())(x2)
    x2 = tf.layers.BatchNormalization()(x2)

    return x2 
Example #11
Source File: nn_module.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _dense_block_mode2(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        if bn:
            z = batch_normalization(x, training=training, name=name + "-" + str(i))
        z = tf.nn.relu(z)
        z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
        z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i), dtype=tf.float32,
                            bias_initializer=tf.zeros_initializer())(z)
        if densenet:
            x = tf.concat([x, z], axis=-1)
        else:
            x = z
    return x 
Example #12
Source File: nn_module.py    From tensorflow-XNN with MIT License 6 votes vote down vote up
def _dense_block_mode1(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
    """
    :param x:
    :param hidden_units:
    :param dropouts:
    :param densenet: enable densenet
    :return:
    Ref: https://github.com/titu1994/DenseNet
    """
    for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
        z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i),
                            dtype=tf.float32,
                            bias_initializer=tf.zeros_initializer())(x)
        if bn:
            z = batch_normalization(z, training=training, name=name+"-"+str(i))
        z = tf.nn.relu(z)
        # z = tf.nn.selu(z)
        z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
        if densenet:
            x = tf.concat([x, z], axis=-1)
        else:
            x = z
    return x 
Example #13
Source File: graphsage.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def aggregate_maxpool(features, agg_transform_size, adj_with_self_loops_indices, num_features, name):
    with tf.name_scope(name):
        fc_weights = tf.get_variable(f"{name}-fc_weights",
                                     shape=[num_features, agg_transform_size],
                                     dtype=tf.float32,
                                     initializer=tf.glorot_uniform_initializer(),
                                     )
        # dims: num_nodes x num_features, num_features x agg_transform_size -> num_nodes x agg_transform_size
        if isinstance(features, tf.SparseTensor):
            transformed_features = tf.sparse_tensor_dense_matmul(features, fc_weights)
        else:
            transformed_features = tf.matmul(features, fc_weights)
        transformed_features = tf.nn.relu(transformed_features)

        # Spread out the transformed features to neighbours.
        # dims: num_nodes x agg_transform_size, num_nodes x max_degree -> num_nodes x agg_transform_size x max_degree
        neighbours_features = tf.gather(transformed_features, adj_with_self_loops_indices)

        # employ the max aggregator
        output = tf.reduce_max(neighbours_features, axis=1)
        return output


# dims:
#   features: num_nodes x num_features 
Example #14
Source File: mlp.py    From gnn-benchmark with MIT License 6 votes vote down vote up
def fully_connected_layer(inputs, output_dim, activation_fn, dropout_prob, weight_decay, name):
    with tf.name_scope(name):
        input_dim = int(inputs.get_shape()[1])
        weights = tf.get_variable("%s-weights" % name, [input_dim, output_dim], dtype=tf.float32,
                                  initializer=tf.glorot_uniform_initializer(),
                                  regularizer=slim.l2_regularizer(weight_decay))

        # Apply dropout to inputs if required
        inputs = tf.cond(
            tf.cast(dropout_prob, tf.bool),
            true_fn=(lambda: dropout_supporting_sparse_tensors(inputs, 1 - dropout_prob)),
            false_fn=(lambda: inputs),
        )

        if isinstance(inputs, tf.SparseTensor):
            output = tf.sparse_tensor_dense_matmul(inputs, weights)
        else:
            output = tf.matmul(inputs, weights)
        output = tf.contrib.layers.bias_add(output)
        return activation_fn(output) if activation_fn else output 
Example #15
Source File: model.py    From multimodal-sentiment-analysis with MIT License 6 votes vote down vote up
def BiGRU(self, inputs, output_size, name, dropout_keep_rate):
        with tf.variable_scope('rnn_' + name, reuse=tf.AUTO_REUSE):
            kernel_init = tf.glorot_uniform_initializer(seed=self.seed, dtype=tf.float32)
            bias_init = tf.zeros_initializer()

            fw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=dropout_keep_rate)

            # bw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
            #                                 kernel_initializer=kernel_init, bias_initializer=bias_init)
            # bw_cell = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=dropout_keep_rate)

            outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell, cell_bw=fw_cell, inputs=inputs,
                                                         sequence_length=self.seq_len, dtype=tf.float32)

            output_fw, output_bw = outputs
            output = tf.concat([output_fw, output_bw], axis=-1)
            return output 
Example #16
Source File: model.py    From RecommenderSystems with MIT License 6 votes vote down vote up
def build(self):
        self.embedding = embedding = tf.get_variable('item_embedding', [self.n_items, self.emb_item],\
                                        initializer=tf.glorot_uniform_initializer())
        features_0 = self.decode() # features of zero layer nodes. 
        #outputs with shape [max_time, batch_size, dim2]
        if self.global_only:
            features_1_2 = self.global_features()
        elif self.local_only:
            features_1_2 = self.local_features()
        else:
            features_1_2 = self.global_and_local_features()
        outputs = self.step_by_step(features_0, features_1_2, self.dims, self.num_samples, self.support_sizes,
                                concat=self.concat)
        concat_self = tf.concat([features_0, outputs], axis=-1)

        # exchange first two dimensions.
        self.transposed_outputs = tf.transpose(concat_self, [1,0,2])

        self.loss = self._loss()
        self.sum_recall = self._recall()
        self.sum_ndcg = self._ndcg()
        grads_and_vars = self.optimizer.compute_gradients(self.loss)
        clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
                        for grad, var in grads_and_vars]
        self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step) 
Example #17
Source File: model.py    From multimodal-sentiment-analysis with MIT License 6 votes vote down vote up
def GRU2(self, inputs, output_size, name, dropout_keep_rate):
        with tf.variable_scope('rnn_' + name, reuse=tf.AUTO_REUSE):
            kernel_init = tf.glorot_uniform_initializer(seed=self.seed, dtype=tf.float32)
            bias_init = tf.zeros_initializer()

            fw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=dropout_keep_rate)

            bw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            bw_cell = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=dropout_keep_rate)

            output_fw, _ = tf.nn.dynamic_rnn(fw_cell, inputs, sequence_length=self.seq_len, dtype=tf.float32)
            output_bw, _ = tf.nn.dynamic_rnn(bw_cell, inputs, sequence_length=self.seq_len, dtype=tf.float32)

            output = tf.concat([output_fw, output_bw], axis=-1)
            return output 
Example #18
Source File: model.py    From AmusingPythonCodes with MIT License 6 votes vote down vote up
def BiGRU(self, inputs, output_size, name, dropout_keep_rate):
        with tf.variable_scope('rnn_' + name, reuse=tf.AUTO_REUSE):
            kernel_init = tf.glorot_uniform_initializer(seed=self.seed, dtype=tf.float32)
            bias_init = tf.zeros_initializer()

            fw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=dropout_keep_rate)

            bw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            bw_cell = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=dropout_keep_rate)

            outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell, cell_bw=fw_cell, inputs=inputs,
                                                         sequence_length=self.seq_len, dtype=tf.float32)

            output_fw, output_bw = outputs
            output = tf.concat([output_fw, output_bw], axis=-1)
            return output 
Example #19
Source File: model.py    From AmusingPythonCodes with MIT License 6 votes vote down vote up
def GRU2(self, inputs, output_size, name, dropout_keep_rate):
        with tf.variable_scope('rnn_' + name, reuse=tf.AUTO_REUSE):
            kernel_init = tf.glorot_uniform_initializer(seed=self.seed, dtype=tf.float32)
            bias_init = tf.zeros_initializer()

            fw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=dropout_keep_rate)

            bw_cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                             kernel_initializer=kernel_init, bias_initializer=bias_init)
            bw_cell = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=dropout_keep_rate)

            output_fw, _ = tf.nn.dynamic_rnn(fw_cell, inputs, sequence_length=self.seq_len, dtype=tf.float32)
            output_bw, _ = tf.nn.dynamic_rnn(bw_cell, inputs, sequence_length=self.seq_len, dtype=tf.float32)

            output = tf.concat([output_fw, output_bw], axis=-1)
            return output 
Example #20
Source File: tf_utils.py    From GMAN with Apache License 2.0 6 votes vote down vote up
def conv2d(x, output_dims, kernel_size, stride = [1, 1],
           padding = 'SAME', use_bias = True, activation = tf.nn.relu,
           bn = False, bn_decay = None, is_training = None):
    input_dims = x.get_shape()[-1].value
    kernel_shape = kernel_size + [input_dims, output_dims]
    kernel = tf.Variable(
        tf.glorot_uniform_initializer()(shape = kernel_shape),
        dtype = tf.float32, trainable = True, name = 'kernel')
    x = tf.nn.conv2d(x, kernel, [1] + stride + [1], padding = padding)
    if use_bias:
        bias = tf.Variable(
            tf.zeros_initializer()(shape = [output_dims]),
            dtype = tf.float32, trainable = True, name = 'bias')
        x = tf.nn.bias_add(x, bias)
    if activation is not None:
        if bn:
            x = batch_norm(x, is_training = is_training, bn_decay = bn_decay)
        x = activation(x)
    return x 
Example #21
Source File: tf_utils.py    From GMAN with Apache License 2.0 6 votes vote down vote up
def conv2d(x, output_dims, kernel_size, stride = [1, 1],
           padding = 'SAME', use_bias = True, activation = tf.nn.relu,
           bn = False, bn_decay = None, is_training = None):
    input_dims = x.get_shape()[-1].value
    kernel_shape = kernel_size + [input_dims, output_dims]
    kernel = tf.Variable(
        tf.glorot_uniform_initializer()(shape = kernel_shape),
        dtype = tf.float32, trainable = True, name = 'kernel')
    x = tf.nn.conv2d(x, kernel, [1] + stride + [1], padding = padding)
    if use_bias:
        bias = tf.Variable(
            tf.zeros_initializer()(shape = [output_dims]),
            dtype = tf.float32, trainable = True, name = 'bias')
        x = tf.nn.bias_add(x, bias)
    if activation is not None:
        if bn:
            x = batch_norm(x, is_training = is_training, bn_decay = bn_decay)
        x = activation(x)
    return x 
Example #22
Source File: detxt_cpn.py    From tf.fashionAI with Apache License 2.0 6 votes vote down vote up
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None):
    """Strided 2-D convolution with explicit padding."""
    # The padding is consistent and is based only on `kernel_size`, not on the
    # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
    if strides > 1:
        inputs = fixed_padding(inputs, kernel_size, data_format)

    return tf.layers.conv2d(
                inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
                padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
                kernel_initializer=kernel_initializer(),
                data_format=data_format, name=name)

# input image order: BGR, range [0-255]
# mean_value: 104, 117, 123
# only subtract mean is used 
Example #23
Source File: simple_xt.py    From tf.fashionAI with Apache License 2.0 6 votes vote down vote up
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None):
    """Strided 2-D convolution with explicit padding."""
    # The padding is consistent and is based only on `kernel_size`, not on the
    # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
    if strides > 1:
        inputs = fixed_padding(inputs, kernel_size, data_format)

    return tf.layers.conv2d(
                inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
                padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
                kernel_initializer=kernel_initializer(),
                data_format=data_format, name=name)

# input image order: BGR, range [0-255]
# mean_value: 104, 117, 123
# only subtract mean is used 
Example #24
Source File: seresnet_cpn.py    From tf.fashionAI with Apache License 2.0 6 votes vote down vote up
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None):
    """Strided 2-D convolution with explicit padding."""
    # The padding is consistent and is based only on `kernel_size`, not on the
    # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
    if strides > 1:
        inputs = fixed_padding(inputs, kernel_size, data_format)

    return tf.layers.conv2d(
                inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
                padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
                kernel_initializer=kernel_initializer(),
                data_format=data_format, name=name)


################################################################################
# ResNet block definitions.
################################################################################ 
Example #25
Source File: cpn.py    From tf.fashionAI with Apache License 2.0 6 votes vote down vote up
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None):
    """Strided 2-D convolution with explicit padding."""
    # The padding is consistent and is based only on `kernel_size`, not on the
    # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
    if strides > 1:
        inputs = fixed_padding(inputs, kernel_size, data_format)

    return tf.layers.conv2d(
                inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
                padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
                kernel_initializer=kernel_initializer(),
                data_format=data_format, name=name)


################################################################################
# ResNet block definitions.
################################################################################ 
Example #26
Source File: cpn.py    From tf.fashionAI with Apache License 2.0 6 votes vote down vote up
def cpn_backbone(inputs, istraining, data_format):
    block_strides = [1, 2, 2, 2]
    inputs = conv2d_fixed_padding(inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer)
    inputs = tf.identity(inputs, 'initial_conv')

    inputs = tf.layers.max_pooling2d(inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=data_format)
    inputs = tf.identity(inputs, 'initial_max_pool')

    end_points = []
    for i, num_blocks in enumerate([3, 4, 6, 3]):
      num_filters = 64 * (2**i)
      #with tf.variable_scope('block_{}'.format(i), 'resnet50', values=[inputs]):
      inputs = block_layer(
          inputs=inputs, filters=num_filters, bottleneck=True,
          block_fn=_bottleneck_block_v1, blocks=num_blocks,
          strides=block_strides[i], training=istraining,
          name='block_layer{}'.format(i + 1), data_format=data_format)
      end_points.append(inputs)

    return end_points 
Example #27
Source File: encoders.py    From neural-structured-learning with Apache License 2.0 6 votes vote down vote up
def __init__(self, emb_dim, is_train, train_dropout=1.0,
               input_dim=None, embeddings=None, scope="embeddings",
               use_tanh=False, num_ps_tasks=None):
    super(EmbeddingLookup, self).__init__()
    self.emb_dim = emb_dim
    self.is_train = is_train
    self.dropout = train_dropout
    self.use_tanh = use_tanh
    with tf.variable_scope(scope):
      if embeddings:
        self.embeddings = embeddings
      else:
        partitioner = None
        if num_ps_tasks:
          partitioner = tf.min_max_variable_partitioner(
              max_partitions=num_ps_tasks
          )
        self.embeddings = tf.get_variable(
            "embeddings", shape=(input_dim, self.emb_dim),
            initializer=tf.glorot_uniform_initializer(),
            partitioner=partitioner
        )
    if not embeddings:
      utils.add_variable_summaries(self.embeddings, scope) 
Example #28
Source File: encoders.py    From neural-structured-learning with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dim, is_train, train_dropout=1.0,
               emb_dim=None, proj_w=None, scope="attention"):
    super(NbrAttentionEmbedding, self).__init__()
    self.input_dim = input_dim
    self.scope = scope
    self.is_train = is_train
    self.dropout = train_dropout
    if emb_dim:
      self.emb_dim = emb_dim
    else:
      # Keep embedding dimension same as input node embedding
      self.emb_dim = self.input_dim
    with tf.variable_scope(scope):
      if proj_w:
        self.proj_w = proj_w
      else:
        self.proj_w = tf.get_variable(
            "W_attention", shape=(2 * self.input_dim, self.emb_dim),
            initializer=tf.glorot_uniform_initializer()
        )
    if not proj_w:
      utils.add_variable_summaries(self.proj_w, self.scope + "/W_attention") 
Example #29
Source File: initializer.py    From zero with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_initializer(initializer, initializer_gain):
    tfdtype = tf.as_dtype(dtype.floatx())

    if initializer == "uniform":
        max_val = initializer_gain
        return tf.random_uniform_initializer(-max_val, max_val, dtype=tfdtype)
    elif initializer == "normal":
        return tf.random_normal_initializer(0.0, initializer_gain, dtype=tfdtype)
    elif initializer == "normal_unit_scaling":
        return tf.variance_scaling_initializer(initializer_gain,
                                               mode="fan_avg",
                                               distribution="normal",
                                               dtype=tfdtype)
    elif initializer == "uniform_unit_scaling":
        return tf.variance_scaling_initializer(initializer_gain,
                                               mode="fan_avg",
                                               distribution="uniform",
                                               dtype=tfdtype)
    else:
        tf.logging.warn("Unrecognized initializer: %s" % initializer)
        tf.logging.warn("Return to default initializer: glorot_uniform_initializer")
        return tf.glorot_uniform_initializer(dtype=tfdtype) 
Example #30
Source File: model.py    From AmusingPythonCodes with MIT License 5 votes vote down vote up
def GRU(self, inputs, output_size, name, dropout_keep_rate):
        with tf.variable_scope('rnn_' + name, reuse=tf.AUTO_REUSE):
            kernel_init = tf.glorot_uniform_initializer(seed=self.seed, dtype=tf.float32)
            bias_init = tf.zeros_initializer()

            cell = tf.contrib.rnn.GRUCell(output_size, name='gru', reuse=tf.AUTO_REUSE, activation=tf.nn.tanh,
                                          kernel_initializer=kernel_init, bias_initializer=bias_init)
            cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout_keep_rate)

            output, _ = tf.nn.dynamic_rnn(cell, inputs, sequence_length=self.seq_len, dtype=tf.float32)

            return output