Python tensorflow.python.ops.init_ops.random_normal_initializer() Examples
The following are 9
code examples of tensorflow.python.ops.init_ops.random_normal_initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.init_ops
, or try the search function
.
Example #1
Source File: mru.py From SketchySceneColorization with MIT License | 6 votes |
def embed_labels(inputs, num_classes, output_dim, sn, weight_decay_rate=1e-5, reuse=None, scope=None): # TODO move regularizer definitions to model weights_regularizer = ly.l2_regularizer(weight_decay_rate) with tf.variable_scope(scope, 'embedding', [inputs], reuse=reuse) as sc: inputs = tf.convert_to_tensor(inputs) weights = tf.get_variable(name="weights", shape=(num_classes, output_dim), initializer=init_ops.random_normal_initializer) # Spectral Normalization if sn: weights = spectral_normed_weight(weights, num_iters=1, update_collection=Config.SPECTRAL_NORM_UPDATE_OPS) embed_out = tf.nn.embedding_lookup(weights, inputs) return embed_out
Example #2
Source File: backend.py From lambda-packs with MIT License | 5 votes |
def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a normal distribution. Arguments: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: ```python # TensorFlow example >>> kvar = K.random_normal_variable((2,3), 0, 1) >>> kvar <tensorflow.python.ops.variables.Variable object at 0x10ab12dd0> >>> K.eval(kvar) array([[ 1.19591331, 0.68685907, -0.63814116], [ 0.92629528, 0.28055015, 1.70484698]], dtype=float32) ``` """ if dtype is None: dtype = floatx() shape = tuple(map(int, shape)) tf_dtype = _convert_string_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = init_ops.random_normal_initializer( mean, scale, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name)
Example #3
Source File: backend.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a normal distribution. Arguments: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: ```python # TensorFlow example >>> kvar = K.random_normal_variable((2,3), 0, 1) >>> kvar <tensorflow.python.ops.variables.Variable object at 0x10ab12dd0> >>> K.eval(kvar) array([[ 1.19591331, 0.68685907, -0.63814116], [ 0.92629528, 0.28055015, 1.70484698]], dtype=float32) ``` """ if dtype is None: dtype = floatx() tf_dtype = _convert_string_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = init_ops.random_normal_initializer( mean, scale, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name)
Example #4
Source File: models.py From lambda-packs with MIT License | 4 votes |
def linear_regression(x, y, init_mean=None, init_stddev=1.0): """Creates linear regression TensorFlow subgraph. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. init_mean: the mean value to use for initialization. init_stddev: the standard devation to use for initialization. Returns: Predictions and loss tensors. Side effects: The variables linear_regression.weights and linear_regression.bias are initialized as follows. If init_mean is not None, then initialization will be done using a random normal initializer with the given init_mean and init_stddv. (These may be set to 0.0 each if a zero initialization is desirable for convex use cases.) If init_mean is None, then the uniform_unit_scaling_initialzer will be used. """ with vs.variable_scope('linear_regression'): scope_name = vs.get_variable_scope().name summary.histogram('%s.x' % scope_name, x) summary.histogram('%s.y' % scope_name, y) dtype = x.dtype.base_dtype y_shape = y.get_shape() if len(y_shape) == 1: output_shape = 1 else: output_shape = y_shape[1] # Set up the requested initialization. if init_mean is None: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], dtype=dtype) bias = vs.get_variable('bias', [output_shape], dtype=dtype) else: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) bias = vs.get_variable( 'bias', [output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) summary.histogram('%s.weights' % scope_name, weights) summary.histogram('%s.bias' % scope_name, bias) return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
Example #5
Source File: models.py From auto-alt-text-lambda-api with MIT License | 4 votes |
def linear_regression(x, y, init_mean=None, init_stddev=1.0): """Creates linear regression TensorFlow subgraph. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. init_mean: the mean value to use for initialization. init_stddev: the standard devation to use for initialization. Returns: Predictions and loss tensors. Side effects: The variables linear_regression.weights and linear_regression.bias are initialized as follows. If init_mean is not None, then initialization will be done using a random normal initializer with the given init_mean and init_stddv. (These may be set to 0.0 each if a zero initialization is desirable for convex use cases.) If init_mean is None, then the uniform_unit_scaling_initialzer will be used. """ with vs.variable_scope('linear_regression'): scope_name = vs.get_variable_scope().name summary.histogram('%s.x' % scope_name, x) summary.histogram('%s.y' % scope_name, y) dtype = x.dtype.base_dtype y_shape = y.get_shape() if len(y_shape) == 1: output_shape = 1 else: output_shape = y_shape[1] # Set up the requested initialization. if init_mean is None: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], dtype=dtype) bias = vs.get_variable('bias', [output_shape], dtype=dtype) else: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) bias = vs.get_variable( 'bias', [output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) summary.histogram('%s.weights' % scope_name, weights) summary.histogram('%s.bias' % scope_name, bias) return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
Example #6
Source File: ind_rnn_cell.py From indrnn with Apache License 2.0 | 4 votes |
def build(self, inputs_shape): if inputs_shape[1].value is None: raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape) input_depth = inputs_shape[1].value if self._input_initializer is None: self._input_initializer = init_ops.random_normal_initializer(mean=0.0, stddev=0.001) self._input_kernel = self.add_variable( "input_kernel", shape=[input_depth, self._num_units], initializer=self._input_initializer) if self._recurrent_initializer is None: self._recurrent_initializer = init_ops.constant_initializer(1.) self._recurrent_kernel = self.add_variable( "recurrent_kernel", shape=[self._num_units], initializer=self._recurrent_initializer) # Clip the absolute values of the recurrent weights to the specified minimum if self._recurrent_min_abs: abs_kernel = math_ops.abs(self._recurrent_kernel) min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs) self._recurrent_kernel = math_ops.multiply( math_ops.sign(self._recurrent_kernel), min_abs_kernel ) # Clip the absolute values of the recurrent weights to the specified maximum if self._recurrent_max_abs: self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel, -self._recurrent_max_abs, self._recurrent_max_abs) self._bias = self.add_variable( "bias", shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype)) self.built = True
Example #7
Source File: models.py From deep_image_model with Apache License 2.0 | 4 votes |
def linear_regression(x, y, init_mean=None, init_stddev=1.0): """Creates linear regression TensorFlow subgraph. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. init_mean: the mean value to use for initialization. init_stddev: the standard devation to use for initialization. Returns: Predictions and loss tensors. Side effects: The variables linear_regression.weights and linear_regression.bias are initialized as follows. If init_mean is not None, then initialization will be done using a random normal initializer with the given init_mean and init_stddv. (These may be set to 0.0 each if a zero initialization is desirable for convex use cases.) If init_mean is None, then the uniform_unit_scaling_initialzer will be used. """ with vs.variable_scope('linear_regression'): scope_name = vs.get_variable_scope().name summary.histogram('%s.x' % scope_name, x) summary.histogram('%s.y' % scope_name, y) dtype = x.dtype.base_dtype y_shape = y.get_shape() if len(y_shape) == 1: output_shape = 1 else: output_shape = y_shape[1] # Set up the requested initialization. if init_mean is None: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], dtype=dtype) bias = vs.get_variable('bias', [output_shape], dtype=dtype) else: weights = vs.get_variable('weights', [x.get_shape()[1], output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) bias = vs.get_variable('bias', [output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) summary.histogram('%s.weights' % scope_name, weights) summary.histogram('%s.bias' % scope_name, bias) return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
Example #8
Source File: indRNN.py From Text-Classification with Apache License 2.0 | 4 votes |
def build(self, inputs_shape): '''construct the IndRNN Cell''' if inputs_shape[1].value is None: raise ValueError("Expected input shape[1] is known") input_depth = inputs_shape[1] if self._input_kernel_initializer is None: self._input_kernel_initializer = init_ops.random_normal_initializer(mean=0, stddev=1e-3) # matrix W self._input_kernel = self.add_variable( "input_kernel", shape=[input_depth, self._num_units], initializer=self._input_kernel_initializer ) if self._recurrent_recurrent_kernel_initializer is None: self._recurrent_recurrent_kernel_initializer = init_ops.constant_initializer(1.) # matrix U self._recurrent_kernel = self.add_variable( "recurrent_kernel", shape=[self._num_units], initializer=self._recurrent_recurrent_kernel_initializer ) # Clip the U to min - max if self._recurrent_min_abs: abs_kernel = math_ops.abs(self._recurrent_kernel) min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs) self._recurrent_kernel = math_ops.multiply( math_ops.sign(self._recurrent_kernel), min_abs_kernel ) if self._recurrent_max_abs: self._recurrent_kernel = clip_ops.clip_by_value( self._recurrent_kernel, -self._recurrent_max_abs, self._recurrent_max_abs ) self._bias = self.add_variable( "bias", shape=[self._num_units], initializer=init_ops.zeros_initializer(dtype=self.dtype) ) # built finished self.built = True
Example #9
Source File: models.py From keras-lambda with MIT License | 4 votes |
def linear_regression(x, y, init_mean=None, init_stddev=1.0): """Creates linear regression TensorFlow subgraph. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. init_mean: the mean value to use for initialization. init_stddev: the standard devation to use for initialization. Returns: Predictions and loss tensors. Side effects: The variables linear_regression.weights and linear_regression.bias are initialized as follows. If init_mean is not None, then initialization will be done using a random normal initializer with the given init_mean and init_stddv. (These may be set to 0.0 each if a zero initialization is desirable for convex use cases.) If init_mean is None, then the uniform_unit_scaling_initialzer will be used. """ with vs.variable_scope('linear_regression'): scope_name = vs.get_variable_scope().name summary.histogram('%s.x' % scope_name, x) summary.histogram('%s.y' % scope_name, y) dtype = x.dtype.base_dtype y_shape = y.get_shape() if len(y_shape) == 1: output_shape = 1 else: output_shape = y_shape[1] # Set up the requested initialization. if init_mean is None: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], dtype=dtype) bias = vs.get_variable('bias', [output_shape], dtype=dtype) else: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) bias = vs.get_variable( 'bias', [output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) summary.histogram('%s.weights' % scope_name, weights) summary.histogram('%s.bias' % scope_name, bias) return losses_ops.mean_squared_error_regressor(x, y, weights, bias)