Python tensorflow.Dimension() Examples
The following are 30
code examples of tensorflow.Dimension().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: control_flow_ops_py_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testWhileShapeInference(self): with self.test_session(): i = tf.constant(0) m = tf.ones([2, 2]) c = lambda i, j: tf.less(i, 2) def b(i, j): new_i = tf.add(i, 1) new_j = tf.concat(0, [j, j]) return [new_i, new_j] r = tf.while_loop(c, b, [i, m], [i.get_shape(), tensor_shape.TensorShape([None, 2])]) self.assertTrue(r[1].get_shape()[0].value is None) self.assertEqual(r[1].get_shape()[1], tf.Dimension(2)) with self.assertRaisesRegexp(ValueError, "not an invariant for"): r = tf.while_loop(c, b, [i, m])
Example #2
Source File: tf_utils.py From GtS with MIT License | 6 votes |
def repeat_2d(x, reps, axis): assert(axis == 0 or axis == 1) if axis == 1: x = tf.transpose(x) static_shape = list(x.get_shape()) dyn_shape = tf.shape(x) x_repeat = tf.reshape(tf.tile(x, [1, reps]), (dyn_shape[0] * reps, dyn_shape[1])) if static_shape[0].value is not None: static_shape[0] = tf.Dimension(static_shape[0].value *reps) x_repeat.set_shape(static_shape) if axis == 1: x_repeat = tf.transpose(x_repeat) return x_repeat
Example #3
Source File: utils.py From tf_audio_steganalysis with GNU General Public License v3.0 | 6 votes |
def get_variables_number(trainable_variables): """ calculate the number of trainable variables in the current network :param trainable_variables: trainable variables :return: total_parameters: the total number of trainable variables """ total_parameters = 0 for variable in trainable_variables: # shape is an array of tf.Dimension shapes = variable.get_shape() variable_parameters = 1 for shape in shapes: variable_parameters *= shape.value total_parameters += variable_parameters return total_parameters
Example #4
Source File: tensor_ops.py From hart with GNU General Public License v3.0 | 6 votes |
def select_present(x, presence, batch_size=1, name='select_present'): with tf.variable_scope(name): presence = 1 - tf.to_int32(presence) # invert mask bs = x.get_shape()[0] if bs != None: # here type(bs) is tf.Dimension and == is ok batch_size = int(bs) num_partitions = 2 * batch_size r = tf.range(0, num_partitions, 2) r.set_shape(tf.TensorShape(batch_size)) r = broadcast_against(r, presence) presence += r selected = tf.dynamic_partition(x, presence, num_partitions) selected = tf.concat(axis=0, values=selected) selected = tf.reshape(selected, tf.shape(x)) return selected
Example #5
Source File: tf_utils.py From nucleus7 with Mozilla Public License 2.0 | 6 votes |
def count_trainable_params(graph: Optional[tf.Graph] = None) -> int: """ Count number of trainable parameters inside of `tf.trainable_variables` Parameters ---------- graph tensorflow graph Returns ------- number_of_parameters number of trainable parameters """ graph = graph or tf.get_default_graph() total_parameters = 0 for variable in graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): # shape is an array of tf.Dimension shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters return total_parameters
Example #6
Source File: gaussian_process.py From BERT with Apache License 2.0 | 6 votes |
def build(self, input_shape=None): input_shape = tf.TensorShape(input_shape) input_dim = input_shape[-1] if isinstance(input_dim, tf.Dimension): input_dim = input_dim.value self.conditional_inputs = self.add_weight( shape=(self.num_inducing, input_dim), name='inducing_inputs', initializer=self.inducing_inputs_initializer, regularizer=self.inducing_inputs_regularizer, constraint=self.inducing_inputs_constraint) self.conditional_outputs = self.add_weight( shape=(self.num_inducing, self.units), name='inducing_outputs', initializer=self.inducing_outputs_initializer, regularizer=self.inducing_outputs_regularizer, constraint=self.inducing_outputs_constraint) super(SparseGaussianProcess, self).build(input_shape)
Example #7
Source File: train-autoencoder.py From autoencoder with MIT License | 6 votes |
def show_parameter_count(variables): """ Count and print how many parameters there are. """ total_parameters = 0 for variable in variables: name = variable.name # shape is an array of tf.Dimension shape = variable.get_shape() variable_parametes = 1 for dim in shape: variable_parametes *= dim.value print('{}: {} ({} parameters)'.format(name, shape, variable_parametes)) total_parameters += variable_parametes print('Total: {} parameters'.format(total_parameters))
Example #8
Source File: reversible_layers.py From BERT with Apache License 2.0 | 6 votes |
def build(self, input_shape): input_shape = tf.TensorShape(input_shape) last_dim = input_shape[-1] if isinstance(last_dim, tf.Dimension): last_dim = last_dim.value if last_dim is None: raise ValueError('The last dimension of the inputs to `ActNorm` ' 'should be defined. Found `None`.') bias = self.add_weight('bias', [last_dim], dtype=self.dtype) log_scale = self.add_weight('log_scale', [last_dim], dtype=self.dtype) # Set data-dependent initializers. bias = bias.assign(self.bias_initial_value) with tf.control_dependencies([bias]): self.bias = bias log_scale = log_scale.assign(self.log_scale_initial_value) with tf.control_dependencies([log_scale]): self.log_scale = log_scale self.built = True
Example #9
Source File: tools.py From stacked_capsule_autoencoders with Apache License 2.0 | 6 votes |
def clip_gradients(gvs, value_clip=0, norm_clip=0): """Clips gradients.""" grads, vs = zip(*gvs) grads = list(grads) if value_clip > 0: for i, g in enumerate(grads): if g is not None: grads[i] = tf.clip_by_value(g, -value_clip, value_clip) if norm_clip > 0: n_params = sum(np.prod(g.shape) for g in grads if g is not None) # n_params is most likely tf.Dimension and cannot be converted # to float directly norm_clip *= np.sqrt(float(int(n_params))) grads_to_clip = [(i, g) for i, g in enumerate(grads) if g is not None] idx, grads_to_clip = zip(*grads_to_clip) clipped_grads = tf.clip_by_global_norm(grads_to_clip, norm_clip)[0] for i, g in zip(idx, clipped_grads): grads[i] = g return [item for item in zip(grads, vs)]
Example #10
Source File: training.py From clinical_concept_extraction with MIT License | 6 votes |
def generate_iterator_ops(filenames, train=True, reuse=False): dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(_parse_function) if train: dataset = dataset.shuffle(buffer_size=2 * FLAGS.batch_size) dataset = dataset.padded_batch( FLAGS.batch_size, ([tf.Dimension(None), tf.Dimension(1024), tf.Dimension(3)], [tf.Dimension(None)], []) ) data_iterator = dataset.make_initializable_iterator() next_x, next_y, next_l = data_iterator.get_next() if train: ops = annotation_func_train(next_x, next_y, next_l, train=train, reuse=reuse) else: ops = annotation_func_test(next_x, next_l, reuse=reuse) ops = list(ops) ops.append(next_y) ops.append(next_l) return data_iterator, ops
Example #11
Source File: bayes.py From BERT with Apache License 2.0 | 6 votes |
def build(self, input_shape): input_shape = tf.TensorShape(input_shape) input_dim = input_shape[-1] if isinstance(input_dim, tf.Dimension): input_dim = input_dim.value self.local_scale = self.add_weight( shape=(input_dim,), name='local_scale', initializer=self.local_scale_initializer, regularizer=self.local_scale_regularizer, constraint=self.local_scale_constraint) self.global_scale = self.add_weight( shape=(), name='global_scale', initializer=self.global_scale_initializer, regularizer=self.global_scale_regularizer, constraint=self.global_scale_constraint) super(DenseHierarchical, self).build(input_shape)
Example #12
Source File: KGAT.py From knowledge_graph_attention_network with MIT License | 5 votes |
def _statistics_params(self): # number of params total_parameters = 0 for variable in self.weights.values(): shape = variable.get_shape() # shape is an array of tf.Dimension variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters if self.verbose > 0: print("#params: %d" % total_parameters)
Example #13
Source File: utils.py From dynamic-training-bench with Mozilla Public License 2.0 | 5 votes |
def count_trainable_parameters(print_model=False): """Count the number of trainable parameters is the current graph. Returns: count: the number of trainable parameters""" total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() if print_model: print(variable) variable_parametes = 1 for dim in shape: variable_parametes *= dim.value total_parameters += variable_parametes return total_parameters
Example #14
Source File: helpers.py From TIES-2.0 with MIT License | 5 votes |
def get_num_parameters(scope=None): total_parameters = 0 for variable in tf.trainable_variables(scope): # shape is an array of tf.Dimension shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters return total_parameters
Example #15
Source File: tf_utils.py From GtS with MIT License | 5 votes |
def block_diagonal(matrices, dtype=tf.float32): """Constructs block-diagonal matrices from a list of batched 2D tensors. Args: matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of matrices with the same batch dimension). dtype: Data type to use. The Tensors in `matrices` must match this dtype. Returns: A matrix with the input matrices stacked along its main diagonal, having shape [..., \sum_i N_i, \sum_i M_i]. """ matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices] blocked_rows = tf.Dimension(0) blocked_cols = tf.Dimension(0) batch_shape = tf.TensorShape(None) for matrix in matrices: full_matrix_shape = matrix.get_shape().with_rank_at_least(2) batch_shape = batch_shape.merge_with(full_matrix_shape[:-2]) blocked_rows += full_matrix_shape[-2] blocked_cols += full_matrix_shape[-1] ret_columns_list = [] for matrix in matrices: matrix_shape = tf.shape(matrix) ret_columns_list.append(matrix_shape[-1]) ret_columns = tf.add_n(ret_columns_list) row_blocks = [] current_column = 0 for matrix in matrices: matrix_shape = tf.shape(matrix) row_before_length = current_column current_column += matrix_shape[-1] row_after_length = ret_columns - current_column row_blocks.append(tf.pad( tensor=matrix, paddings=tf.concat(0, [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32), [(row_before_length, row_after_length)]]))) blocked = tf.concat(-2, row_blocks) blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols))) return blocked
Example #16
Source File: main_procedure.py From SketchySceneColorization with MIT License | 5 votes |
def print_parameter_count(verbose=False): total_parameters = 0 for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator'): # shape is an array of tf.Dimension shape = variable.get_shape() # print(len(shape)) variable_parametes = 1 for dim in shape: # print(dim) variable_parametes *= dim.value if verbose and len(shape) > 1: print(shape) print(variable_parametes) total_parameters += variable_parametes print('generator') print('total_parameters', total_parameters) total_parameters = 0 for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator'): # shape is an array of tf.Dimension shape = variable.get_shape() # print(len(shape)) variable_parametes = 1 for dim in shape: # print(dim) variable_parametes *= dim.value if verbose and len(shape) > 1: print(shape) print(variable_parametes) total_parameters += variable_parametes print('discriminator') print('total_parameters', total_parameters)
Example #17
Source File: input_source_base.py From ADL with MIT License | 5 votes |
def build_or_reuse_placeholder(tensor_spec): """ Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one. Args: tensor_spec (tf.TensorSpec): Returns: tf.Tensor: """ g = tfv1.get_default_graph() name = tensor_spec.name try: tensor = g.get_tensor_by_name(name + ':0') assert "Placeholder" in tensor.op.type, "Tensor {} exists but is not a placeholder!".format(name) assert tensor_spec.is_compatible_with(tensor), \ "Tensor {} exists but is not compatible with the signature!".format(tensor) if tensor.shape.as_list() == tensor_spec.shape.as_list(): # It might be desirable to use a placeholder of a different shape in some tower # (e.g., a less specific shape) # Comparing `tensor.shape` directly doesn't work, because # tensorflow thinks `tf.Dimension(None)` and `tf.Dimension(None)` are not equal. return tensor except KeyError: pass with tfv1.name_scope(None): # clear any name scope it might get called in ret = tfv1.placeholder( tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name) return ret
Example #18
Source File: utils.py From lang2program with Apache License 2.0 | 5 votes |
def assert_shape(variable, shape): """Assert that a TensorFlow Variable has a particular shape. Args: variable: TF Variable shape: a TensorShape, Dimension or tuple """ variable.get_shape().assert_is_compatible_with(shape)
Example #19
Source File: NMF.py From neural_graph_collaborative_filtering with MIT License | 5 votes |
def _statistics_params(self): # number of params total_parameters = 0 for variable in self.weights.values(): shape = variable.get_shape() # shape is an array of tf.Dimension variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters if self.verbose > 0: print("#params: %d" % total_parameters)
Example #20
Source File: dataset.py From XMUNMT with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_inference_input(inputs, params): dataset = tf.data.Dataset.from_tensor_slices( tf.constant(inputs) ) # Split string dataset = dataset.map(lambda x: tf.string_split([x]).values, num_parallel_calls=params.num_threads) # Append <eos> dataset = dataset.map( lambda x: tf.concat([x, [tf.constant(params.eos)]], axis=0), num_parallel_calls=params.num_threads ) # Convert tuple to dictionary dataset = dataset.map( lambda x: {"source": x, "source_length": tf.shape(x)[0]}, num_parallel_calls=params.num_threads ) dataset = dataset.padded_batch( params.decode_batch_size, {"source": [tf.Dimension(None)], "source_length": []}, {"source": params.pad, "source_length": 0} ) iterator = dataset.make_one_shot_iterator() features = iterator.get_next() src_table = tf.contrib.lookup.index_table_from_tensor( tf.constant(params.vocabulary["source"]), default_value=params.mapping["source"][params.unk] ) features["source"] = src_table.lookup(features["source"]) return features
Example #21
Source File: utils.py From lang2program with Apache License 2.0 | 5 votes |
def expand_dims_for_broadcast(low_tensor, high_tensor): """Expand the dimensions of a lower-rank tensor, so that its rank matches that of a higher-rank tensor. This makes it possible to perform broadcast operations between low_tensor and high_tensor. Args: low_tensor (Tensor): lower-rank Tensor with shape [s_0, ..., s_p] high_tensor (Tensor): higher-rank Tensor with shape [s_0, ..., s_p, ..., s_n] Note that the shape of low_tensor must be a prefix of the shape of high_tensor. Returns: Tensor: the lower-rank tensor, but with shape expanded to be [s_0, ..., s_p, 1, 1, ..., 1] """ orig_shape = tf.shape(low_tensor) orig_rank = tf.rank(low_tensor) target_rank = tf.rank(high_tensor) # assert that shapes are compatible assert_op = assert_broadcastable(low_tensor, high_tensor) with tf.control_dependencies([assert_op]): pad_shape = tf.tile([1], [target_rank - orig_rank]) new_shape = tf.concat(0, [orig_shape, pad_shape]) result = tf.reshape(low_tensor, new_shape) # add static shape information high_shape_static = high_tensor.get_shape() low_shape_static = low_tensor.get_shape() extra_rank = high_shape_static.ndims - low_shape_static.ndims result_dims = list(low_shape_static.dims) + [tf.Dimension(1)] * extra_rank result_shape = tf.TensorShape(result_dims) result.set_shape(result_shape) return result
Example #22
Source File: CFKG.py From knowledge_graph_attention_network with MIT License | 5 votes |
def _statistics_params(self): # number of params total_parameters = 0 for variable in self.weights.values(): shape = variable.get_shape() # shape is an array of tf.Dimension variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters if self.verbose > 0: print("#params: %d" % total_parameters)
Example #23
Source File: NFM.py From knowledge_graph_attention_network with MIT License | 5 votes |
def _statistics_params(self): # number of params total_parameters = 0 for variable in self.weights.values(): shape = variable.get_shape() # shape is an array of tf.Dimension variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters if self.verbose > 0: print("#params: %d" % total_parameters)
Example #24
Source File: MaSIF_site.py From masif with Apache License 2.0 | 5 votes |
def count_number_parameters(self): total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() print(variable) variable_parameters = 1 for dim in shape: variable_parameters *= dim.value print(variable_parameters) total_parameters += variable_parameters print("Total number parameters: %d" % total_parameters)
Example #25
Source File: MaSIF_ppi_search.py From masif with Apache License 2.0 | 5 votes |
def count_number_parameters(self): total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() print(variable) variable_parameters = 1 for dim in shape: variable_parameters *= dim.value print(variable_parameters) total_parameters += variable_parameters print("Total number parameters: %d" % total_parameters)
Example #26
Source File: MaSIF_ligand.py From masif with Apache License 2.0 | 5 votes |
def count_number_parameters(self): total_parameters = 0 for variable in tf.trainable_variables(): # shape is an array of tf.Dimension shape = variable.get_shape() print(variable) # print(shape) # print(len(shape)) variable_parameters = 1 for dim in shape: # print(dim) variable_parameters *= dim.value print(variable_parameters) total_parameters += variable_parameters print("Total number parameters: %d" % total_parameters)
Example #27
Source File: utils.py From lang2program with Apache License 2.0 | 5 votes |
def assert_shape(variable, shape): """Assert that a TensorFlow Variable has a particular shape. Args: variable: TF Variable shape: a TensorShape, Dimension or tuple """ variable.get_shape().assert_is_compatible_with(shape)
Example #28
Source File: pond.py From tf-encrypted with Apache License 2.0 | 5 votes |
def _avgpool2d_reshape_reduce(x, pool_size: Tuple[int, int], *args): """Perform 2D average pooling by the reshape method.""" del args pool_height = tf.Dimension(pool_size[0]) pool_width = tf.Dimension(pool_size[1]) n, c, h, w = x.shape return ( x.reshape([n, c, h // pool_height, pool_height, w // pool_width, pool_width]) .reduce_sum(axis=3) .reduce_sum(axis=4) )
Example #29
Source File: seq_batch.py From lang2program with Apache License 2.0 | 5 votes |
def __init__(self, align='left', seq_length=None, dtype=tf.int32, name='FeedSequenceBatch'): """Create a Feedable SequenceBatch. Args: align (str): can be 'left' or 'right'. If 'left', values will be left-aligned, with padding on the right. If 'right', values will be right-aligned, with padding on the left. Default is 'left'. seq_length (int): the Tensor representing the SequenceBatch will have exactly this many columns. Default is None. If None, seq_length will be dynamically determined. dtype: data type of the SequenceBatch values array. Defaults to int32. name (str): namescope for the Tensors created inside this Model. """ if align not in ('left', 'right'): raise ValueError("align must be either 'left' or 'right'.") self._align_right = (align == 'right') self._seq_length = seq_length with tf.name_scope(name): values = tf.placeholder(dtype, shape=[None, None], name='values') # (batch_size, seq_length) mask = tf.placeholder(tf.float32, shape=[None, None], name='mask') # (batch_size, seq_length) if self._seq_length is not None: # add static shape information batch_dim, _ = values.get_shape() new_shape = tf.TensorShape([batch_dim, tf.Dimension(seq_length)]) values.set_shape(new_shape) mask.set_shape(new_shape) super(FeedSequenceBatch, self).__init__(values, mask)
Example #30
Source File: BPRMF.py From knowledge_graph_attention_network with MIT License | 5 votes |
def _statistics_params(self): # number of params total_parameters = 0 for variable in self.weights.values(): shape = variable.get_shape() # shape is an array of tf.Dimension variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters if self.verbose > 0: print("#params: %d" % total_parameters)