Python tensorflow.assert_rank() Examples
The following are 30
code examples of tensorflow.assert_rank().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: utils.py From zhusuan with MIT License | 6 votes |
def assert_scalar(tensor, name): """ Whether the `tensor` is a scalar (0-D tensor). :param tensor: A Tensor to be checked. :param name: The name of `tensor` for error message. :return: The checked tensor. """ static_shape = tensor.get_shape() shape_err_msg = name + " should be a scalar (0-D tensor)." if static_shape and (static_shape.ndims >= 1): raise ValueError(shape_err_msg) else: _assert_shape_op = tf.assert_rank(tensor, 0, message=shape_err_msg) with tf.control_dependencies([_assert_shape_op]): tensor = tf.identity(tensor) return tensor
Example #2
Source File: vocab_utils.py From inference with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #3
Source File: diagonal_gaussian.py From ProMP with MIT License | 6 votes |
def log_likelihood_sym(self, x_var, dist_info_vars): """ Symbolic log likelihood log p(x) of the distribution Args: x_var (tf.Tensor): variable where to evaluate the log likelihood dist_info_vars (dict) : dict of distribution parameters as tf.Tensor Returns: (numpy array): log likelihood """ means = dist_info_vars["mean"] log_stds = dist_info_vars["log_std"] # assert ranks tf.assert_rank(x_var, 2), tf.assert_rank(means, 2), tf.assert_rank(log_stds, 2) zs = (x_var - means) / tf.exp(log_stds) return - tf.reduce_sum(log_stds, reduction_indices=-1) - \ 0.5 * tf.reduce_sum(tf.square(zs), reduction_indices=-1) - \ 0.5 * self.dim * np.log(2 * np.pi)
Example #4
Source File: vocab_utils.py From nmt with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #5
Source File: layers.py From tf_autoencoder with Apache License 2.0 | 6 votes |
def fc_encoder(inputs, hidden_units, dropout, scope=None): net = inputs with tf.variable_scope(scope, 'encoder', [inputs]): tf.assert_rank(inputs, 2) for layer_id, num_hidden_units in enumerate(hidden_units): with tf.variable_scope( 'layer_{}'.format(layer_id), values=(net,)) as layer_scope: net = tf.contrib.layers.fully_connected( net, num_outputs=num_hidden_units, scope=layer_scope) if dropout is not None: net = slim.dropout(net) add_hidden_layer_summary(net) net = tf.identity(net, name='output') return net
Example #6
Source File: vocab_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #7
Source File: vocab_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #8
Source File: vocab_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #9
Source File: vocab_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #10
Source File: vocab_utils.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #11
Source File: vocab_utils.py From nlp-architect with Apache License 2.0 | 6 votes |
def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False, ) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes
Example #12
Source File: layers.py From tf_autoencoder with Apache License 2.0 | 5 votes |
def conv_encoder(inputs, num_filters, scope=None): net = inputs with tf.variable_scope(scope, 'encoder', [inputs]): tf.assert_rank(inputs, 4) for layer_id, num_outputs in enumerate(num_filters): with tf.variable_scope('block{}'.format(layer_id)): net = slim.repeat(net, 2, conv2d_fixed_padding, num_outputs=num_outputs) net = tf.contrib.layers.max_pool2d(net) net = tf.identity(net, name='output') return net
Example #13
Source File: layers.py From tf_autoencoder with Apache License 2.0 | 5 votes |
def conv_decoder(inputs, num_filters, output_shape, scope=None): net = inputs with tf.variable_scope(scope, 'decoder', [inputs]): tf.assert_rank(inputs, 4) for layer_id, units in enumerate(num_filters): with tf.variable_scope('block_{}'.format(layer_id), values=(net,)): net = tf.contrib.layers.conv2d_transpose(net, units, stride=2) add_hidden_layer_summary(net) with tf.variable_scope('linear', values=(net,)): net = tf.contrib.layers.conv2d_transpose( net, 1, activation_fn=None) tf.summary.histogram('activation', net) with tf.name_scope('crop', values=[net]): shape = net.get_shape().as_list() assert len(shape) == len(output_shape), 'shape mismatch' slice_beg = [0] slice_size = [-1] for sin, sout in zip(shape[1:], output_shape[1:]): if sin == sout: slice_beg.append(0) slice_size.append(-1) else: assert sin > sout, "{} <= {}".format(sin, sout) beg = (sin - sout) // 2 slice_beg.append(beg) slice_size.append(sout) net = tf.slice(net, slice_beg, slice_size) net = tf.identity(net, name='output') return net
Example #14
Source File: ops.py From sText2Image with MIT License | 5 votes |
def kl_divergence(p, q): tf.assert_rank(p,2) tf.assert_rank(q,2) p_shape = tf.shape(p) q_shape = tf.shape(q) tf.assert_equal(p_shape, q_shape) # normalize sum to 1 p_ = tf.divide(p, tf.tile(tf.expand_dims(tf.reduce_sum(p,axis=1), 1), [1,p_shape[1]])) q_ = tf.divide(q, tf.tile(tf.expand_dims(tf.reduce_sum(q,axis=1), 1), [1,p_shape[1]])) return tf.reduce_sum(tf.multiply(p_, tf.log(tf.divide(p_, q_))), axis=1)
Example #15
Source File: model.py From class-balanced-loss with MIT License | 5 votes |
def separable_conv(x, filters, kernel_size, activation): """Apply a depthwise separable 1d convolution.""" tf.assert_rank(x, 3) net = tf.expand_dims(x, 2) net = tf.layers.separable_conv2d( net, filters=filters, kernel_size=(kernel_size, 1), padding='same', activation=activation) net = tf.squeeze(net, axis=2) return net
Example #16
Source File: diagonal_gaussian.py From ProMP with MIT License | 5 votes |
def kl_sym(self, old_dist_info_vars, new_dist_info_vars): """ Computes the symbolic representation of the KL divergence of two multivariate Gaussian distribution with diagonal covariance matrices Args: old_dist_info_vars (dict) : dict of old distribution parameters as tf.Tensor new_dist_info_vars (dict) : dict of new distribution parameters as tf.Tensor Returns: (tf.Tensor) : Symbolic representation of kl divergence (tensorflow op) """ old_means = old_dist_info_vars["mean"] old_log_stds = old_dist_info_vars["log_std"] new_means = new_dist_info_vars["mean"] new_log_stds = new_dist_info_vars["log_std"] # assert ranks tf.assert_rank(old_means, 2), tf.assert_rank(old_log_stds, 2) tf.assert_rank(new_means, 2), tf.assert_rank(new_log_stds, 2) old_std = tf.exp(old_log_stds) new_std = tf.exp(new_log_stds) numerator = tf.square(old_means - new_means) + \ tf.square(old_std) - tf.square(new_std) denominator = 2 * tf.square(new_std) + 1e-8 return tf.reduce_sum( numerator / denominator + new_log_stds - old_log_stds, reduction_indices=-1)
Example #17
Source File: dice_maml.py From ProMP with MIT License | 5 votes |
def magic_box(logprobs): """ Dice magic box operator Args: logprobs: 2d tensor of log probabilities (batch_size, max_path_length) Returns: tf.Tensor of shape : Dice magic box operator """ tf.assert_rank(logprobs, 2) with tf.variable_scope("magic_box"): tau = tf.cumsum(logprobs, axis=1) magic_box = tf.exp(tau - tf.stop_gradient(tau)) return magic_box
Example #18
Source File: project_tests.py From semantic_segmentation with GNU General Public License v3.0 | 5 votes |
def _assert_tensor_shape(tensor, shape, display_name): assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name)) tensor_shape = tensor.get_shape().as_list() if len(shape) else [] wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape) if cor_dim is not None and ten_dim != cor_dim] assert not wrong_dimension, \ '{} has wrong shape. Found {}'.format(display_name, tensor_shape)
Example #19
Source File: utils.py From zhusuan with MIT License | 5 votes |
def assert_positive_int32_scalar(value, name): """ Whether `value` is a integer(or 0-D `tf.int32` tensor) and positive. If `value` is the instance of built-in type, it will be checked directly. Otherwise, it will be converted to a `tf.int32` tensor and checked. :param value: The value to be checked. :param name: The name of `value` used in error message. :return: The checked value. """ if isinstance(value, (int, float)): if isinstance(value, int) and value > 0: return value elif isinstance(value, float): raise TypeError(name + " must be integer") elif value <= 0: raise ValueError(name + " must be positive") else: try: tensor = tf.convert_to_tensor(value, tf.int32) except (TypeError, ValueError): raise TypeError(name + ' must be (convertible to) tf.int32') _assert_rank_op = tf.assert_rank( tensor, 0, message=name + " should be a scalar (0-D Tensor).") _assert_positive_op = tf.assert_greater( tensor, tf.constant(0, tf.int32), message=name + " must be positive") with tf.control_dependencies([_assert_rank_op, _assert_positive_op]): tensor = tf.identity(tensor) return tensor
Example #20
Source File: ops.py From DeepHDR with MIT License | 5 votes |
def kl_divergence(p, q): tf.assert_rank(p,2) tf.assert_rank(q,2) p_shape = tf.shape(p) q_shape = tf.shape(q) tf.assert_equal(p_shape, q_shape) # normalize sum to 1 p_ = tf.divide(p, tf.tile(tf.expand_dims(tf.reduce_sum(p,axis=1), 1), [1,p_shape[1]])) q_ = tf.divide(q, tf.tile(tf.expand_dims(tf.reduce_sum(q,axis=1), 1), [1,p_shape[1]])) return tf.reduce_sum(tf.multiply(p_, tf.log(tf.divide(p_, q_))), axis=1)
Example #21
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_if_rank_is_not_integer_dynamic(self): with self.test_session(): tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor") rank_tensor = tf.placeholder(tf.float32, name="rank_tensor") with self.assertRaisesRegexp(TypeError, "must be of type <dtype: 'int32'>"): with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]): tf.identity(tensor).eval(feed_dict={rank_tensor: .5})
Example #22
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_if_rank_is_not_integer_static(self): with self.test_session(): tensor = tf.constant([1, 2], name="my_tensor") with self.assertRaisesRegexp(TypeError, "must be of type <dtype: 'int32'>"): tf.assert_rank(tensor, .5)
Example #23
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_raises_if_rank_is_not_scalar_static(self): with self.test_session(): tensor = tf.constant([1, 2], name="my_tensor") with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"): tf.assert_rank(tensor, np.array([], dtype=np.int32))
Example #24
Source File: check_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self): with self.test_session(): tensor = tf.constant([1, 2], name="my_tensor") desired_rank = 2 with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"): with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]): tf.identity(tensor).eval()
Example #25
Source File: univariate.py From zhusuan with MIT License | 5 votes |
def __init__(self, logits, n_experiments, dtype=tf.int32, group_ndims=0, check_numerics=False, **kwargs): self._logits = tf.convert_to_tensor(logits) param_dtype = assert_same_float_dtype( [(self._logits, 'Binomial.logits')]) assert_dtype_is_int_or_float(dtype) sign_err_msg = "n_experiments must be positive" if isinstance(n_experiments, int): if n_experiments <= 0: raise ValueError(sign_err_msg) self._n_experiments = n_experiments else: try: n_experiments = tf.convert_to_tensor(n_experiments, tf.int32) except ValueError: raise TypeError('n_experiments must be int32') _assert_rank_op = tf.assert_rank( n_experiments, 0, message="n_experiments should be a scalar (0-D Tensor).") _assert_positive_op = tf.assert_greater( n_experiments, 0, message=sign_err_msg) with tf.control_dependencies([_assert_rank_op, _assert_positive_op]): self._n_experiments = tf.identity(n_experiments) self._check_numerics = check_numerics super(Binomial, self).__init__( dtype=dtype, param_dtype=param_dtype, is_continuous=False, is_reparameterized=False, group_ndims=group_ndims, **kwargs)
Example #26
Source File: base.py From zhusuan with MIT License | 5 votes |
def __init__(self, dtype, param_dtype, is_continuous, is_reparameterized, use_path_derivative=False, group_ndims=0, **kwargs): if 'group_event_ndims' in kwargs: raise ValueError( "The argument `group_event_ndims` has been deprecated " "Please use `group_ndims` instead.") self._dtype = dtype self._param_dtype = param_dtype self._is_continuous = is_continuous self._is_reparameterized = is_reparameterized self._use_path_derivative = use_path_derivative if isinstance(group_ndims, int): if group_ndims < 0: raise ValueError("group_ndims must be non-negative.") self._group_ndims = group_ndims else: group_ndims = tf.convert_to_tensor(group_ndims, tf.int32) _assert_rank_op = tf.assert_rank( group_ndims, 0, message="group_ndims should be a scalar (0-D Tensor).") _assert_nonnegative_op = tf.assert_greater_equal( group_ndims, 0, message="group_ndims must be non-negative.") with tf.control_dependencies([_assert_rank_op, _assert_nonnegative_op]): self._group_ndims = tf.identity(group_ndims)
Example #27
Source File: base.py From zhusuan with MIT License | 5 votes |
def sample(self, n_samples=None): """ sample(n_samples=None) Return samples from the distribution. When `n_samples` is None (by default), one sample of shape ``batch_shape + value_shape`` is generated. For a scalar `n_samples`, the returned Tensor has a new sample dimension with size `n_samples` inserted at ``axis=0``, i.e., the shape of samples is ``[n_samples] + batch_shape + value_shape``. :param n_samples: A 0-D `int32` Tensor or None. How many independent samples to draw from the distribution. :return: A Tensor of samples. """ if n_samples is None: samples = self._sample(n_samples=1) return tf.squeeze(samples, axis=0) elif isinstance(n_samples, int): return self._sample(n_samples) else: n_samples = tf.convert_to_tensor(n_samples, dtype=tf.int32) _assert_rank_op = tf.assert_rank( n_samples, 0, message="n_samples should be a scalar (0-D Tensor).") with tf.control_dependencies([_assert_rank_op]): samples = self._sample(n_samples) return samples
Example #28
Source File: embracenet.py From embracenet with MIT License | 5 votes |
def add_modality(self, input_data, input_size, bypass_docking=False): """ Add a modality to EmbraceNet. Args: input_data: An input data to feed into EmbraceNet. Must be a 2-D tensor of shape [batch_size, input_size]. input_size: The second dimension of input_data. bypass_docking: Bypass docking step, i.e., connect the input data directly to the embracement layer. If True, input_data must have a shape of [batch_size, embracement_size]. """ # check input data tf_assertions = [] tf_assertions.append(tf.assert_rank(input_data, 2)) tf_assertions.append(tf.assert_equal(tf.shape(input_data)[0], self.batch_size)) with tf.control_dependencies(tf_assertions): input_data = tf.identity(input_data) with tf.variable_scope('embracenet'): # construct docking layer modality_index = len(self.graph.modalities) modality_graph = EmbraceNetObject() modality_feeds = EmbraceNetObject() with tf.variable_scope('docking/%d' % modality_index): docking_input = input_data if (bypass_docking): modality_graph.docking_output = docking_input else: docking_output = tf.layers.dense(docking_input, units=self.embracement_size, kernel_initializer=None, bias_initializer=None) docking_output = tf.nn.relu(docking_output) modality_graph.docking_output = docking_output # finalize self.graph.modalities.append(modality_graph) self.feeds.modalities.append(modality_feeds)
Example #29
Source File: model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def separable_conv(x, filters, kernel_size, activation): """Apply a depthwise separable 1d convolution.""" tf.assert_rank(x, 3) net = tf.expand_dims(x, 2) net = tf.layers.separable_conv2d( net, filters=filters, kernel_size=(kernel_size, 1), padding='same', activation=activation) net = tf.squeeze(net, axis=2) return net
Example #30
Source File: project_tests.py From CarND-Semantic-Segmentation with MIT License | 5 votes |
def _assert_tensor_shape(tensor, shape, display_name): """ Check whether the tensor and another shape match in shape :param tensor: TF Tensor :param shape: Some array :param display_name: Name of tensor to print if assertions fail """ assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name)) tensor_shape = tensor.get_shape().as_list() if len(shape) else [] wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape) if cor_dim is not None and ten_dim != cor_dim] assert not wrong_dimension, \ '{} has wrong shape. Found {}'.format(display_name, tensor_shape)