Python tensorflow.Operation() Examples
The following are 30
code examples of tensorflow.Operation().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: analysis_graph_builder.py From transform with Apache License 2.0 | 6 votes |
def _serialize_op_attr(op_attr): """Deterministicly serializes tf.Operation attrs since it is a map.""" sorted_attributes = sorted(op_attr.items(), key=lambda kv: kv[0]) if 'f' in op_attr: # This is a tf.Function node, and it includes attributes that are # inconsistent across runs such as _gradient_op_type, config_proto, so we # only keep input and output types since other information will arrive from # the FuncGraph attributes. sorted_attributes = [ kv for kv in sorted_attributes if kv[0] in ('Tin', 'Tout') ] result = [] for key, attr_value in sorted_attributes: result.append(key) attr_value = copy.deepcopy(attr_value) if attr_value.list.func: raise ValueError( 'Unable to serialize op attributes that contain a `list.func` field') if attr_value.HasField('func'): # There should be a separate call for the FuncGraph attributes. attr_value.ClearField('func') result.append(attr_value.SerializeToString()) return result
Example #2
Source File: utils.py From spark-deep-learning with Apache License 2.0 | 6 votes |
def get_op(tfobj_or_name, graph): """ Get a :py:class:`tf.Operation` object. :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or a name to either. :param graph: a :py:class:`tf.Graph` object containing the operation. By default the graph we don't require this argument to be provided. """ graph = validated_graph(graph) _assert_same_graph(tfobj_or_name, graph) if isinstance(tfobj_or_name, tf.Operation): return tfobj_or_name name = tfobj_or_name if isinstance(tfobj_or_name, tf.Tensor): name = tfobj_or_name.name if not isinstance(name, six.string_types): raise TypeError('invalid op request for [type {}] {}'.format(type(name), name)) _op_name = op_name(name, graph=None) op = graph.get_operation_by_name(_op_name) # pylint: disable=invalid-name err_msg = 'cannot locate op {} in the current graph, got [type {}] {}' assert isinstance(op, tf.Operation), err_msg.format(_op_name, type(op), op) return op
Example #3
Source File: utils.py From spark-deep-learning with Apache License 2.0 | 6 votes |
def get_tensor(tfobj_or_name, graph): """ Get a :py:class:`tf.Tensor` object :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or a name to either. :param graph: a :py:class:`tf.Graph` object containing the tensor. By default the graph we don't require this argument to be provided. """ graph = validated_graph(graph) _assert_same_graph(tfobj_or_name, graph) if isinstance(tfobj_or_name, tf.Tensor): return tfobj_or_name name = tfobj_or_name if isinstance(tfobj_or_name, tf.Operation): name = tfobj_or_name.name if not isinstance(name, six.string_types): raise TypeError('invalid tensor request for {} of {}'.format(name, type(name))) _tensor_name = tensor_name(name, graph=None) tnsr = graph.get_tensor_by_name(_tensor_name) err_msg = 'cannot locate tensor {} in the current graph, got [type {}] {}' assert isinstance(tnsr, tf.Tensor), err_msg.format(_tensor_name, type(tnsr), tnsr) return tnsr
Example #4
Source File: model_handler.py From nucleus7 with Mozilla Public License 2.0 | 6 votes |
def get_train_op(self, model_results: ModelResults) -> tf.Operation: """ Create train operation using optimization handler. Also will add the update operation to it Parameters ---------- model_results model results Returns ------- train_with_update_op train operation together with update operation """ with tf.variable_scope(ScopeNames.TRAIN_OP): train_op = self.optimization_handler.get_train_op( model_results.grads_and_vars, model_results.regularization_grads_and_vars, trainable_variables=self.model.trainable_variables) update_op = self._get_update_op() train_with_update_op = tf.group( train_op, update_op, name='train_op') return train_with_update_op
Example #5
Source File: tf_util.py From stable-baselines with MIT License | 6 votes |
def __init__(self, inputs, outputs, updates, givens): """ Theano like function :param inputs: (TensorFlow Tensor or Object with make_feed_dict) list of input arguments :param outputs: (TensorFlow Tensor) list of outputs or a single output to be returned from function. Returned value will also have the same shape. :param updates: ([tf.Operation] or tf.Operation) list of update functions or single update function that will be run whenever the function is called. The return is ignored. :param givens: (dict) the values known for the output """ for inpt in inputs: if not hasattr(inpt, 'make_feed_dict') and not (isinstance(inpt, tf.Tensor) and len(inpt.op.inputs) == 0): assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method" self.inputs = inputs updates = updates or [] self.update_group = tf.group(*updates) self.outputs_update = list(outputs) + [self.update_group] self.givens = {} if givens is None else givens
Example #6
Source File: tensorflow_translator.py From eran with Apache License 2.0 | 6 votes |
def matmul_resources(self, op): """ checks which one of the direct ancestor tf.Operations is a constant and returns the underlying tensor as a numpy.ndarray inside a tuple. The matrix is manipulated in a way that it can be used as the left multiplier in the matrix multiplication. Arguments --------- op : tf.Operation must have type "MatMul" Return ------ output : tuple tuple with the matrix (of type numpy.ndarray) as its only item """ inputs = op.inputs left = inputs[0] right = inputs[1] if left.op.type == "Const": matrix = self.sess.run(left) if not op.get_attr("transpose_a") else self.sess.run(left).transpose() else: matrix = self.sess.run(right).transpose() if not op.get_attr("transpose_b") else self.sess.run(right) return (matrix,)
Example #7
Source File: tensorflow_translator.py From eran with Apache License 2.0 | 6 votes |
def add_resources(self, op): """ checks which one of the direct ancestor tf.Operations is a constant and returns the underlying tensor as a numpy.ndarray inside a tuple. Arguments --------- op : tf.Operation must have type "Add" Return ------ output : tuple tuple with the addend (of type numpy.ndarray) as its only item """ inputs = op.inputs left = inputs[0] right = inputs[1] if left.op.type == "Const": addend = self.sess.run(left) else: addend = self.sess.run(right) return (addend,)
Example #8
Source File: tensorflow_translator.py From eran with Apache License 2.0 | 6 votes |
def conv2d_resources(self, op): """ Extracts the filter, the stride of the filter, and the padding from op as well as the shape of the input coming into op Arguments --------- op : tf.Operation must have type "Conv2D" Return ------ output : tuple has 4 entries (numpy.ndarray, numpy.ndarray, numpy.ndarray, str) """ inputs = op.inputs image = inputs[0] filters = op.inputs[1] filters = self.sess.run(filters) image_shape = tensorshape_to_intlist(image.shape)[1:] strides = op.get_attr('strides')[1:3] padding_str = op.get_attr('padding').decode('utf-8') pad_top, pad_left = calculate_padding(padding_str, image_shape, filters.shape, strides) return filters, image_shape, strides, pad_top, pad_left
Example #9
Source File: tensorflow_translator.py From eran with Apache License 2.0 | 6 votes |
def pool_resources(self, op): """ Extracts the incoming image size (heigth, width, channels), the size of the maxpool/averagepool window (heigth, width), and the strides of the window (heigth, width) Arguments --------- op : tf.Operation must have type "MaxPool" or "AvgPool" Return ------ output : tuple has 4 entries - (list, numpy.ndarray, numpy.ndarray, str) """ image = op.inputs[0] image_shape = tensorshape_to_intlist(image.shape)[1:] window_size = op.get_attr('ksize')[1:3] strides = op.get_attr('strides')[1:3] padding_str = op.get_attr('padding').decode('utf-8') pad_top, pad_left = calculate_padding(padding_str, image_shape, window_size, strides) return image_shape, window_size, strides, pad_top, pad_left
Example #10
Source File: base.py From batchflow with Apache License 2.0 | 6 votes |
def _to_names(self, graph_item): # Base cases if isinstance(graph_item, tf.Tensor): return ('Tensor', graph_item.name) if isinstance(graph_item, tf.Operation): return ('Operation', graph_item.name) if isinstance(graph_item, tf.Variable): return ('Variable', graph_item.op.name) if isinstance(graph_item, (bool, str, int, float)) or graph_item is None: return graph_item # Handle different containers if isinstance(graph_item, (list, tuple, np.ndarray)): return type(graph_item)([self._to_names(item) for item in graph_item]) if isinstance(graph_item, (dict, Config)): return type(graph_item)({key: self._to_names(graph_item[key]) for key in graph_item.keys()}) raise ValueError('Unrecognized type of value.')
Example #11
Source File: base.py From batchflow with Apache License 2.0 | 6 votes |
def _to_graph_items(self, name): # Base cases if isinstance(name, (bool, str, int, float)) or name is None: return name # Handle different containers if isinstance(name, (list, tuple, np.ndarray)): if len(name) == 2: type_, name_ = name if type_ == 'Variable': with self.graph.as_default(): return tf.global_variables(name_)[0] if type_ == 'Tensor': return self.graph.get_tensor_by_name(name_) if type_ == 'Operation': return self.graph.get_operation_by_name(name_) return type(name)([self._to_graph_items(item) for item in name]) if isinstance(name, (dict, Config)): return type(name)({key: self._to_graph_items(name[key]) for key in name.keys()}) raise ValueError('Unrecognized type of value.')
Example #12
Source File: optimizers.py From AmpliGraph with Apache License 2.0 | 6 votes |
def minimize(self, loss): """Create an optimizer to minimize the model loss Parameters ---------- loss: tf.Tensor Node which needs to be evaluated for computing the model loss. Returns ------- train: tf.Operation Node that needs to be evaluated for minimizing the loss during training """ self.optimizer = tf.train.AdagradOptimizer(learning_rate=self._optimizer_params['lr']) train = self.optimizer.minimize(loss) return train
Example #13
Source File: tensor_forest_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testTrainingConstructionClassificationSparse(self): input_data = tf.SparseTensor( indices=[[0, 0], [0, 3], [1, 0], [1, 7], [2, 1], [3, 9]], values=[-1.0, 0.0, -1., 2., 1., -2.0], shape=[4, 10]) input_labels = [0, 1, 2, 3] params = tensor_forest.ForestHParams( num_classes=4, num_features=10, num_trees=10, max_nodes=1000, split_after_samples=25).fill() graph_builder = tensor_forest.RandomForestGraphs(params) graph = graph_builder.training_graph(input_data, input_labels) self.assertTrue(isinstance(graph, tf.Operation))
Example #14
Source File: summaries.py From deep_image_model with Apache License 2.0 | 6 votes |
def tf_num_params(x): """Number of parameters in a TensorFlow subgraph. Args: x: root of the subgraph (Tensor, Operation) Returns: Total number of elements found in all Variables in the subgraph. """ if isinstance(x, tf.Tensor): shape = x.get_shape() x = x.op if x.type == "Variable": return shape.num_elements() totals = [tf_num_params(y) for y in x.inputs] return sum(totals)
Example #15
Source File: summaries.py From deep_image_model with Apache License 2.0 | 6 votes |
def tf_left_split(op): """Split the parameters of op for left recursion. Args: op: tf.Operation Returns: A tuple of the leftmost input tensor and a list of the remaining arguments. """ if len(op.inputs) < 1: return None, [] if op.type == "Concat": return op.inputs[1], op.inputs[2:] return op.inputs[0], op.inputs[1:]
Example #16
Source File: summaries.py From deep_image_model with Apache License 2.0 | 6 votes |
def tf_parameter_iter(x): """Iterate over the left branches of a graph and yield sizes. Args: x: root of the subgraph (Tensor, Operation) Yields: A triple of name, number of params, and shape. """ while 1: if isinstance(x, tf.Tensor): shape = x.get_shape().as_list() x = x.op else: shape = "" left, right = tf_left_split(x) totals = [tf_num_params(y) for y in right] total = sum(totals) yield x.name, total, shape if left is None: break x = left
Example #17
Source File: common.py From tf-encrypted with Apache License 2.0 | 6 votes |
def evaluate(self, sess, x, y, data_owner): """Return the accuracy""" def print_accuracy(y_hat, y) -> tf.Operation: with tf.name_scope("print-accuracy"): correct_prediction = tf.equal(tf.round(y_hat), y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print_op = tf.print( "Accuracy on {}:".format(data_owner.player_name), accuracy ) return print_op with tf.name_scope("evaluate"): y_hat = self.forward(x) print_accuracy_op = tfe.define_output( data_owner.player_name, [y_hat, y], print_accuracy ) sess.run(print_accuracy_op, tag="evaluate")
Example #18
Source File: delayed_update_trainer.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 6 votes |
def accumulate_ops(self) -> List[tf.Operation]: # pylint: disable=unpacking-non-sequence existing_gradients, _ = self.existing_grads_and_vars # pylint: enable=unpacking-non-sequence # pylint: disable=not-an-iterable # Pylint does not understand @tensor annotations accumulate_ops = [ tf.assign_add(gradbuf, grad) for gradbuf, grad in zip( self.gradient_buffers, existing_gradients)] accumulate_ops.extend( tf.assign_add(objbuf, obj.loss) for objbuf, obj in zip(self.objective_buffers, self.objectives)) # pylint: enable=not-an-iterable accumulate_ops.append( tf.assign_add(self.diff_buffer, self.differentiable_loss_sum)) accumulate_ops.append( tf.assign_add(self.cumulator_counter, 1)) return accumulate_ops
Example #19
Source File: optimizers.py From AmpliGraph with Apache License 2.0 | 6 votes |
def minimize(self, loss): """Create an optimizer to minimize the model loss Parameters ---------- loss: tf.Tensor Node which needs to be evaluated for computing the model loss. Returns ------- train: tf.Operation Node that needs to be evaluated for minimizing the loss during training """ self.optimizer = tf.train.AdamOptimizer(learning_rate=self._optimizer_params['lr']) train = self.optimizer.minimize(loss) return train
Example #20
Source File: delayed_update_trainer.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 6 votes |
def accumulate_ops(self) -> List[tf.Operation]: # pylint: disable=unpacking-non-sequence existing_gradients, _ = self.existing_grads_and_vars # pylint: enable=unpacking-non-sequence # pylint: disable=not-an-iterable # Pylint does not understand @tensor annotations accumulate_ops = [ tf.assign_add(gradbuf, grad) for gradbuf, grad in zip( self.gradient_buffers, existing_gradients)] accumulate_ops.extend( tf.assign_add(objbuf, obj.loss) for objbuf, obj in zip(self.objective_buffers, self.objectives)) # pylint: enable=not-an-iterable accumulate_ops.append( tf.assign_add(self.diff_buffer, self.differentiable_loss_sum)) accumulate_ops.append( tf.assign_add(self.cumulator_counter, 1)) return accumulate_ops
Example #21
Source File: optimizers.py From AmpliGraph with Apache License 2.0 | 6 votes |
def minimize(self, loss): """Create an optimizer to minimize the model loss Parameters ---------- loss: tf.Tensor Node which needs to be evaluated for computing the model loss. Returns ------- train: tf.Operation Node that needs to be evaluated for minimizing the loss during training """ self.optimizer = tf.train.MomentumOptimizer(learning_rate=self._optimizer_params['lr'], momentum=self._optimizer_params['momentum']) train = self.optimizer.minimize(loss) return train
Example #22
Source File: delayed_update_trainer.py From neuralmonkey with BSD 3-Clause "New" or "Revised" License | 6 votes |
def accumulate_ops(self) -> List[tf.Operation]: # pylint: disable=unpacking-non-sequence existing_gradients, _ = self.existing_grads_and_vars # pylint: enable=unpacking-non-sequence # pylint: disable=not-an-iterable # Pylint does not understand @tensor annotations accumulate_ops = [ tf.assign_add(gradbuf, grad) for gradbuf, grad in zip( self.gradient_buffers, existing_gradients)] accumulate_ops.extend( tf.assign_add(objbuf, obj.loss) for objbuf, obj in zip(self.objective_buffers, self.objectives)) # pylint: enable=not-an-iterable accumulate_ops.append( tf.assign_add(self.diff_buffer, self.differentiable_loss_sum)) accumulate_ops.append( tf.assign_add(self.cumulator_counter, 1)) return accumulate_ops
Example #23
Source File: tolstoi_char_rnn.py From DeepOBS with MIT License | 6 votes |
def _get_state_update_op(self, state_variables, new_states): """Add an operation to update the train states with the last state tensors Args: state_variables (tf.Variable): State variables to be updated new_states (tf.Variable): New state of the state variable. Returns: tf.Operation: Return a tuple in order to combine all update_ops into a single operation. The tuple's actual value should not be used. """ # Add an operation to update the train states with the last state tensors update_ops = [] for state_variable, new_state in zip(state_variables, new_states): # Assign the new state to the state variables on this layer update_ops.extend([ state_variable[0].assign(new_state[0]), state_variable[1].assign(new_state[1]) ]) # Return a tuple in order to combine all update_ops into a single operation. # The tuple's actual value should not be used. return tf.tuple(update_ops)
Example #24
Source File: model.py From ffn with Apache License 2.0 | 5 votes |
def __init__(self, deltas, batch_size=None, define_global_step=True): assert self.dim is not None self.deltas = deltas self.batch_size = batch_size # Initialize the shift collection. This is used during training with the # fixed step size policy. self.shifts = [] for dx in (-self.deltas[0], 0, self.deltas[0]): for dy in (-self.deltas[1], 0, self.deltas[1]): for dz in (-self.deltas[2], 0, self.deltas[2]): if dx == 0 and dy == 0 and dz == 0: continue self.shifts.append((dx, dy, dz)) if define_global_step: self.global_step = tf.Variable(0, name='global_step', trainable=False) # The seed is always a placeholder which is fed externally from the # training/inference drivers. self.input_seed = tf.placeholder(tf.float32, name='seed') self.input_patches = tf.placeholder(tf.float32, name='patches') # For training, labels should be defined as a TF object. self.labels = None # Optional. Provides per-pixel weights with which the loss is multiplied. # If specified, should have the same shape as self.labels. self.loss_weights = None self.logits = None # type: tf.Operation # List of image tensors to save in summaries. The images are concatenated # along the X axis. self._images = []
Example #25
Source File: utils.py From spark-deep-learning with Apache License 2.0 | 5 votes |
def tensor_name(tfobj_or_name, graph=None): """ Derive the :py:class:`tf.Tensor` name from a :py:class:`tf.Operation` or :py:class:`tf.Tensor` object, or its name. If a name is provided and the graph is not, we will derive the tensor name based on TensorFlow's naming convention. If the input is a TensorFlow object, or the graph is given, we also check that the tensor exists in the associated graph. :param tfobj_or_name: either a :py:class:`tf.Tensor`, :py:class:`tf.Operation` or a name to either. :param graph: a :py:class:`tf.Graph` object containing the tensor. By default the graph we don't require this argument to be provided. """ if graph is not None: return get_tensor(tfobj_or_name, graph).name if isinstance(tfobj_or_name, six.string_types): # If input is a string, assume it is a name and infer the corresponding tensor name. # WARNING: this depends on TensorFlow's tensor naming convention name = tfobj_or_name name_parts = name.split(":") assert len(name_parts) <= 2, name_parts if len(name_parts) < 2: name += ":0" return name elif hasattr(tfobj_or_name, 'graph'): return get_tensor(tfobj_or_name, tfobj_or_name.graph).name else: raise TypeError('invalid tf.Tensor name query type {}'.format(type(tfobj_or_name)))
Example #26
Source File: run.py From tf-encrypted with Apache License 2.0 | 5 votes |
def receive_output(average: tf.Tensor) -> tf.Operation: # simply print average return tf.print("Average:", average)
Example #27
Source File: __init__.py From tf-encrypted with Apache License 2.0 | 5 votes |
def global_variables_initializer() -> tf.Operation: return tf.global_variables_initializer()
Example #28
Source File: pond.py From tf-encrypted with Apache License 2.0 | 5 votes |
def __init__(self, prot, x_on_0, x_on_1, is_scaled, updater): assert isinstance(x_on_0, AbstractTensor), type(x_on_0) assert isinstance(x_on_1, AbstractTensor), type(x_on_1) assert isinstance(updater, tf.Operation), type(updater) super(PondCachedPublicTensor, self).__init__( prot, x_on_0, x_on_1, is_scaled, ) self.updater = updater
Example #29
Source File: pond.py From tf-encrypted with Apache License 2.0 | 5 votes |
def __init__(self, prot, x0, x1, is_scaled, updater): assert isinstance(x0, AbstractTensor), type(x0) assert isinstance(x1, AbstractTensor), type(x1) assert isinstance(updater, tf.Operation), type(updater) super(PondCachedPrivateTensor, self).__init__(prot, x0, x1, is_scaled) self.updater = updater
Example #30
Source File: run.py From tf-encrypted with Apache License 2.0 | 5 votes |
def receive_output(self, logits: tf.Tensor) -> tf.Operation: with tf.name_scope("post-processing"): prediction = tf.argmax(logits, axis=1) op = tf.print("Result", prediction, summarize=self.BATCH_SIZE) return op