Python tensorflow.python.ops.init_ops.glorot_uniform_initializer() Examples
The following are 9
code examples of tensorflow.python.ops.init_ops.glorot_uniform_initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.init_ops
, or try the search function
.
Example #1
Source File: variable_scope.py From lambda-packs with MIT License | 5 votes |
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = init_ops.zeros_initializer()( shape=shape, dtype=dtype.base_dtype) initializing_from_value = True # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer
Example #2
Source File: variable_scope.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = init_ops.zeros_initializer()( shape=shape, dtype=dtype.base_dtype) initializing_from_value = True # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer
Example #3
Source File: transformer_layers.py From nematus with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, vocabulary_size, embedding_size, hidden_size, float_dtype, name): # Set arguments self.vocabulary_size = vocabulary_size self.hidden_size = hidden_size self.float_dtype = float_dtype self.name = name # Create embedding matrix and its transposes with tf.compat.v1.variable_scope(self.name): self.embedding_table = tf.compat.v1.get_variable(name='embedding_table', shape=[vocabulary_size, embedding_size], dtype=float_dtype, initializer=glorot_uniform_initializer(), trainable=True) self.projection_matrix = tf.transpose(a=self.embedding_table, name='vocab_projection_matrix')
Example #4
Source File: rnn.py From seq2seq with Apache License 2.0 | 5 votes |
def __init__(self, cell_size): self.cell_size = cell_size self.default_initializer = tf.get_variable_scope().initializer or init_ops.glorot_uniform_initializer() self.initializer = tf.orthogonal_initializer()
Example #5
Source File: nvcnn.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def _get_variable(self, name, shape, dtype=None, initializer=None, seed=None): if dtype is None: dtype = self.dtype if initializer is None: initializer = init_ops.glorot_uniform_initializer(seed=seed) elif (isinstance(initializer, float) or isinstance(initializer, int)): initializer = tf.constant_initializer(float(initializer)) return tf.get_variable(name, shape, dtype, initializer)
Example #6
Source File: variable_scope.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = init_ops.zeros_initializer()( shape=shape, dtype=dtype.base_dtype) initializing_from_value = True # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer
Example #7
Source File: variable_scope.py From keras-lambda with MIT License | 5 votes |
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool: initializer = init_ops.zeros_initializer()( shape=shape, dtype=dtype.base_dtype) initializing_from_value = True # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer
Example #8
Source File: transformer_attention_modules.py From nematus with BSD 3-Clause "New" or "Revised" License | 4 votes |
def __init__(self, reference_dims, hypothesis_dims, hidden_dims, float_dtype, dropout_attn, training, name, attn_type='multiplicative'): # Declare attributes self.reference_dims = reference_dims self.hypothesis_dims = hypothesis_dims self.hidden_dims = hidden_dims self.float_dtype = float_dtype self.attn_type = attn_type self.training = training self.name = name assert attn_type in ['additive', 'multiplicative'], 'Attention type {:s} is not supported.'.format(attn_type) if dropout_attn > 0: self.dropout_attn = tf.keras.layers.Dropout(rate=dropout_attn) else: self.dropout_attn = None # Instantiate parameters with tf.compat.v1.variable_scope(self.name): self.queries_projection = None self.attn_weight = None if attn_type == 'additive': self.queries_projection = FeedForwardLayer(self.hypothesis_dims, self.hidden_dims, float_dtype, dropout_rate=0., activation=None, use_bias=False, use_layer_norm=False, training=self.training, name='queries_projection') self.attn_weight = tf.compat.v1.get_variable(name='attention_weight', shape=self.hidden_dims, dtype=float_dtype, initializer=glorot_uniform_initializer(), trainable=True) self.keys_projection = FeedForwardLayer(self.reference_dims, self.hidden_dims, float_dtype, dropout_rate=0., activation=None, use_bias=False, use_layer_norm=False, training=self.training, name='keys_projection')
Example #9
Source File: transformer_layers.py From nematus with BSD 3-Clause "New" or "Revised" License | 4 votes |
def __init__(self, in_size, out_size, float_dtype, dropout_rate, activation, use_bias, use_layer_norm, training, name): # Set attributes self.in_size = in_size self.out_size = out_size self.dropout_rate = dropout_rate self.activation = activation self.use_bias = use_bias self.training = training self.name = name with tf.compat.v1.variable_scope(self.name): # Set up layer normalization if use_layer_norm: self.layer_norm_layer = LayerNormLayer(out_size) else: self.layer_norm_layer = None if dropout_rate > 0: self.dropout = tf.keras.layers.Dropout(rate=dropout_rate) else: self.dropout = None # Define parameters weights_shape = [in_size, out_size] if out_size is not None else [in_size] self.weights = tf.compat.v1.get_variable(name='dense_layer_weights', shape=weights_shape, dtype=float_dtype, initializer=glorot_uniform_initializer(), trainable=True) if use_bias: biases_shape = [out_size] if out_size is not None else [in_size] self.biases = tf.compat.v1.get_variable(name='dense_layer_biases', shape=biases_shape, dtype=float_dtype, initializer=tf.zeros_initializer(), trainable=True)