Python tensorflow.python.ops.init_ops.Initializer() Examples
The following are 7
code examples of tensorflow.python.ops.init_ops.Initializer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.init_ops
, or try the search function
.
Example #1
Source File: base.py From mayo with MIT License | 6 votes |
def _getter_kwargs(self, instance): defaults = instance._parameter_config.get(self.name, {}) kwargs = {} for key in ['initial', 'shape']: value = getattr(self, key, None) if value is None: try: value = defaults[key] except KeyError: raise KeyError( 'Parameter {} does not specify a configuration for {}.' .format(self.name, key)) kwargs[key] = value kwargs['name'] = self.name init = kwargs.pop('initial') from tensorflow.python.ops.init_ops import Initializer if init is not None and not isinstance(init, Initializer): init = tf.constant_initializer( value=init, dtype=self.dtype, verify_shape=True) kwargs['initializer'] = init kwargs['dtype'] = self.dtype kwargs['trainable'] = self.trainable return kwargs
Example #2
Source File: ops.py From auDeep with GNU General Public License v3.0 | 5 votes |
def linear(input: tf.Tensor, output_size: int, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, name: str = "linear") -> tf.Tensor: """ Apply a linear transformation to a tensor. Parameters ---------- input: tf.Tensor The tensor which should be linearly transformed output_size: int The desired output size of the linear transformation weight_initializer: tf.Initializer, optional A custom initializer for the weight matrix of the linear transformation bias_initializer: tf.Initializer, optional A custom initializer for the bias vector of the linear transformation name: str, optional A name for the operation (default "linear") Returns ------- tf.Tensor The linearly transformed input tensor """ shape = input.get_shape().as_list() with tf.variable_scope(name): weights = tf.get_variable(name="weights", shape=[shape[-1], output_size], dtype=tf.float32, initializer=weight_initializer) bias = tf.get_variable(name="bias", shape=[output_size], initializer=bias_initializer) return tf.matmul(input, weights) + bias
Example #3
Source File: ops.py From auDeep with GNU General Public License v3.0 | 5 votes |
def time_distributed_linear(inputs: tf.Tensor, output_size: int, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, name: str = "time_dist_linear") -> tf.Tensor: """ Applies the same linear transformation to all time steps of a sequence. Parameters ---------- inputs: tf.Tensor The input sequences, of shape [max_time, batch_size, num_features] output_size: int The desired number of features in the output sequences weight_initializer: tf.Initializer, optional A custom initializer for the weight matrix of the linear transformation bias_initializer: tf.Initializer, optional A custom initializer for the bias vector of the linear transformation name: str, optional A name for the operation (default "time_dist_linear") Returns ------- tf.Tensor The linearly transformed input sequences, of shape [max_time, batch_size, output_size] """ max_time, batch_size, _ = tf.unstack(tf.shape(inputs)) static_shape = inputs.shape.as_list() with tf.variable_scope(name): result = flatten_time(inputs) result = linear(result, output_size=output_size, weight_initializer=weight_initializer, bias_initializer=bias_initializer) result = restore_time(result, max_time, batch_size, output_size) result.set_shape([static_shape[0], static_shape[1], output_size]) return result
Example #4
Source File: variable_scope.py From lambda-packs with MIT License | 4 votes |
def get_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, reuse=None, trainable=True, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None,): """Gets an existing variable with this name or create a new one.""" if regularizer is None: regularizer = self._regularizer if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if custom_getter is None: custom_getter = self._custom_getter if reuse is None: reuse = self._reuse full_name = self.name + "/" + name if self.name else name # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # Check that `initializer` dtype and `dtype` are consistent before # replacing them with defaults. if (dtype is not None and initializer is not None and not callable(initializer)): init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype if init_dtype != dtype: raise ValueError("Initializer type '%s' and explicit dtype '%s' " "don't match." % (init_dtype, dtype)) if initializer is None: initializer = self._initializer if dtype is None: dtype = self._dtype if use_resource is None: use_resource = self._use_resource return var_store.get_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter)
Example #5
Source File: ops.py From auDeep with GNU General Public License v3.0 | 4 votes |
def conv2d(input: tf.Tensor, output_dim: int, kernel_width: int = 5, kernel_height: int = 5, horizontal_stride: int = 2, vertical_stride: int = 2, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, name: str = "conv2d"): """ Apply a 2D-convolution to a tensor. Parameters ---------- input: tf.Tensor The tensor to which the convolution should be applied. Must be of shape [batch_size, height, width, channels] output_dim: int The number of convolutional filters kernel_width: int, optional The width of the convolutional filters (default 5) kernel_height: int, optional The height of the convolutional filters (default 5) horizontal_stride: int, optional The horizontal stride of the convolutional filters (default 2) vertical_stride: int, optional The vertical stride of the convolutional filters (default 2) weight_initializer: tf.Initializer, optional A custom initializer for the weight matrices of the filters bias_initializer: tf.Initializer, optional A custom initializer for the bias vectors of the filters name: str, optional A name for the operation (default "conv2d") Returns ------- tf.Tensor The result of applying a 2D-convolution to the input tensor. """ shape = input.get_shape().as_list() with tf.variable_scope(name): weights = tf.get_variable(name="weights", shape=[kernel_height, kernel_width, shape[-1], output_dim], initializer=weight_initializer) bias = tf.get_variable(name="bias", shape=[output_dim], initializer=bias_initializer) conv = tf.nn.conv2d(input, filter=weights, strides=[1, vertical_stride, horizontal_stride, 1], padding='SAME') conv = tf.nn.bias_add(conv, bias) return conv
Example #6
Source File: ops.py From auDeep with GNU General Public License v3.0 | 4 votes |
def deconv2d(input: tf.Tensor, output_shape: Sequence[Union[int, tf.Tensor]], kernel_width: int = 5, kernel_height: int = 5, horizontal_stride: int = 2, vertical_stride: int = 2, weight_initializer: Optional[Initializer] = None, bias_initializer: Optional[Initializer] = None, name: str = "deconv2d"): """ Applies a 2D-deconvolution to a tensor. Parameters ---------- input: tf.Tensor The tensor to which a 2D-deconvolution should be applied. Must be of shape [batch_size, height, width, channels] output_shape: list of int or tf.Tensor The desired output shape. kernel_width: int, optional The width of the convolutional filters (default 5) kernel_height: int, optional The height of the convolutional filters (default 5) horizontal_stride: int, optional The horizontal stride of the convolutional filters (default 2) vertical_stride: int, optional The vertical stride of the convolutional filters (default 2) weight_initializer: tf.Initializer, optional A custom initializer for the weight matrices of the filters bias_initializer: tf.Initializer, optional A custom initializer for the bias vectors of the filters name: str, optional A name for the operation (default "deconv2d") Returns ------- tf.Tensor The result of applying a 2D-deconvolution to the input tensor """ shape = input.get_shape().as_list() with tf.variable_scope(name): # filter : [height, width, output_channels, in_channels] weights = tf.get_variable(name="weights", shape=[kernel_height, kernel_width, output_shape[-1], shape[-1]], initializer=weight_initializer) biases = tf.get_variable(name="bias", shape=[output_shape[-1]], initializer=bias_initializer) deconv = tf.nn.conv2d_transpose(input, filter=weights, output_shape=output_shape, strides=[1, vertical_stride, horizontal_stride, 1]) deconv = tf.nn.bias_add(deconv, biases) deconv.set_shape([None] + output_shape[1:]) return deconv
Example #7
Source File: variable_scope.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 4 votes |
def get_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, reuse=None, trainable=True, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None): """Gets an existing variable with this name or create a new one.""" if regularizer is None: regularizer = self._regularizer if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if custom_getter is None: custom_getter = self._custom_getter if context.in_graph_mode(): if reuse is None: reuse = self._reuse if use_resource is None: use_resource = self._use_resource else: reuse = AUTO_REUSE use_resource = True full_name = self.name + "/" + name if self.name else name # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # Check that `initializer` dtype and `dtype` are consistent before # replacing them with defaults. if (dtype is not None and initializer is not None and not callable(initializer)): init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype if init_dtype != dtype: raise ValueError("Initializer type '%s' and explicit dtype '%s' " "don't match." % (init_dtype, dtype)) if initializer is None: initializer = self._initializer if constraint is None: constraint = self._constraint if dtype is None: dtype = self._dtype return var_store.get_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint)