Python tensorflow.python.eager.context.in_graph_mode() Examples
The following are 30
code examples of tensorflow.python.eager.context.in_graph_mode().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.eager.context
, or try the search function
.
Example #1
Source File: nn_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _flatten_outer_dims(logits): """Flattens logits' outer dimensions and keep its last dimension.""" rank = array_ops.rank(logits) last_dim_size = array_ops.slice( array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1]) output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0)) # Set output shape if known. if context.in_graph_mode(): shape = logits.get_shape() if shape is not None and shape.dims is not None: shape = shape.as_list() product = 1 product_valid = True for d in shape[:-1]: if d is None: product_valid = False break else: product *= d if product_valid: output_shape = [product, shape[-1]] output.set_shape(output_shape) return output
Example #2
Source File: function.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def add_to_graph(self, g): """Adds this function into the graph g.""" self._create_definition_if_needed() # Adds this function into 'g'. # pylint: disable=protected-access if context.in_graph_mode(): g._add_function(self) else: context.context().add_function_def(self.definition) # pylint: enable=protected-access # Ensures related sub-routines are defined in 'g', too. for f in self._sub_functions.values(): f.add_to_graph(g) # Adds its gradient function, too. if self._grad_func: self._grad_func.add_to_graph(g)
Example #3
Source File: ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def control_dependencies(control_inputs): """Wrapper for `Graph.control_dependencies()` using the default graph. See @{tf.Graph.control_dependencies} for more details. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. """ if context.in_graph_mode(): return get_default_graph().control_dependencies(control_inputs) else: return _NullContextmanager()
Example #4
Source File: adam.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _create_slots(self, var_list): # Create the beta1 and beta2 accumulators on the same device as the first # variable. Sort the var_list to make sure this device is consistent across # workers (these need to go on the same PS, otherwise some updates are # silently ignored). first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name)
Example #5
Source File: lookup_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def __init__(self, table_ref, default_value, initializer): """Construct a table object from a table reference. If requires a table initializer object (subclass of `TableInitializerBase`). It provides the table key and value types, as well as the op to initialize the table. The caller is responsible to execute the initialization op. Args: table_ref: The table reference, i.e. the output of the lookup table ops. default_value: The value to use if a key is missing in the table. initializer: The table initializer to use. """ if context.in_graph_mode(): name = table_ref.op.name.split("/")[-1] else: name = context.context().scope_name super(InitializableLookupTableBase, self).__init__(initializer.key_dtype, initializer.value_dtype, name) self._table_ref = table_ref self._default_value = ops.convert_to_tensor( default_value, dtype=self._value_dtype) self._default_value.get_shape().merge_with(tensor_shape.scalar()) self._init = initializer.initialize(self)
Example #6
Source File: array_grad.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _TileGrad(op, grad): """Sum reduces grad along the tiled dimensions.""" assert isinstance(grad, ops.Tensor) input_shape = array_ops.shape(op.inputs[0]) # We interleave multiples and input_shape to get split_shape, # reshape grad to split_shape, and reduce along all even # dimensions (the tiled dimensions) to get the result # with shape input_shape. For example # input_shape = [20, 30, 40] # multiples = [2, 3, 4] # split_shape = [2, 20, 3, 30, 4, 40] # axes = [0, 2, 4] split_shape = array_ops.reshape( array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1]) axes = math_ops.range(0, array_ops.size(split_shape), 2) input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes) # Fix shape inference if context.in_graph_mode(): input_grad.set_shape(op.inputs[0].get_shape()) return [input_grad, None]
Example #7
Source File: array_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def identity(input, name=None): # pylint: disable=redefined-builtin r"""Return a tensor with the same shape and contents as input. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ if context.in_graph_mode(): return gen_array_ops.identity(input, name=name) else: if context.context().device_name != input.device: return input._copy() # pylint: disable=protected-access return input # pylint: disable=redefined-builtin,protected-access
Example #8
Source File: data_flow_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def __init__(self, dtype, shape, accumulator_ref): """Creates a new ConditionalAccumulator. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. accumulator_ref: A handle to the conditional accumulator, created by sub- classes """ self._dtype = dtype if shape is not None: self._shape = tensor_shape.TensorShape(shape) else: self._shape = tensor_shape.unknown_shape() self._accumulator_ref = accumulator_ref if context.in_graph_mode(): self._name = self._accumulator_ref.op.name.split("/")[-1] else: self._name = context.context().scope_name
Example #9
Source File: AMSGrad.py From AMSGrad-Tensorflow with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #10
Source File: AdaBound.py From AdaBound-Tensorflow with Apache License 2.0 | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) graph = None if context.executing_eagerly() else ops.get_default_graph() create_new = self._get_non_slot_variable("beta1_power", graph) is None if not create_new and context.in_graph_mode(): create_new = (self._get_non_slot_variable("beta1_power", graph).graph is not first_var.graph) if create_new: self._create_non_slot_variable(initial_value=self._beta1, name="beta1_power", colocate_with=first_var) self._create_non_slot_variable(initial_value=self._beta2, name="beta2_power", colocate_with=first_var) self._create_non_slot_variable(initial_value=self._gamma, name="gamma_multi", colocate_with=first_var) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #11
Source File: AdaBound.py From captcha_trainer with Apache License 2.0 | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) if StrictVersion(tf.__version__) >= StrictVersion('1.10.0'): graph = None if context.executing_eagerly() else ops.get_default_graph() else: graph = ops.get_default_graph() create_new = self._get_non_slot_variable("beta1_power", graph) is None if not create_new and context.in_graph_mode(): create_new = (self._get_non_slot_variable("beta1_power", graph).graph is not first_var.graph) if create_new: self._create_non_slot_variable(initial_value=self._beta1, name="beta1_power", colocate_with=first_var) self._create_non_slot_variable(initial_value=self._beta2, name="beta2_power", colocate_with=first_var) self._create_non_slot_variable(initial_value=self._gamma, name="gamma_multi", colocate_with=first_var) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #12
Source File: AMSGrad.py From scGAN with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable( self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable( self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #13
Source File: AMSGrad.py From Custom-Optimizer-in-TensorFlow with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #14
Source File: opt.py From EMNLP2018_NLI with GNU General Public License v3.0 | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #15
Source File: AMSGrad.py From DCRNN with MIT License | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #16
Source File: opt.py From EMNLP2018_NLI with GNU General Public License v3.0 | 6 votes |
def _create_slots(self, var_list): first_var = min(var_list, key=lambda x: x.name) create_new = self._beta1_power is None if not create_new and context.in_graph_mode(): create_new = (self._beta1_power.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) # Create slots for the first and second moments. for v in var_list : self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "vhat", self._name)
Example #17
Source File: nadam.py From BERT with Apache License 2.0 | 5 votes |
def _create_slots(self, var_list): # Create the beta1 and beta2 accumulators on the same device as the first # variable. Sort the var_list to make sure this device is consistent across # workers (these need to go on the same PS, otherwise some updates are # silently ignored). first_var = min(var_list, key=lambda x: x.name) create_new = self._iterations is None if not create_new and context.in_graph_mode(): create_new = (self._iterations.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) self._iterations = variable_scope.variable(0., name="iterations", trainable=False) self._m_schedule = variable_scope.variable(1., name="m_schedule", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name)
Example #18
Source File: check_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def assert_type(tensor, tf_type, message=None, name=None): """Statically asserts that the given `Tensor` is of the specified type. Args: tensor: A tensorflow `Tensor`. tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`, etc). message: A string to prefix to the default message. name: A name to give this `Op`. Defaults to "assert_type" Raises: TypeError: If the tensors data type doesn't match `tf_type`. Returns: A `no_op` that does nothing. Type can be determined statically. """ message = message or '' with ops.name_scope(name, 'assert_type', [tensor]): tensor = ops.convert_to_tensor(tensor, name='tensor') if tensor.dtype != tf_type: if context.in_graph_mode(): raise TypeError( '%s %s must be of type %s' % (message, tensor.name, tf_type)) else: raise TypeError( '%s tensor must be of type %s' % (message, tf_type)) return control_flow_ops.no_op('statically_determined_correct_type') # pylint: disable=line-too-long
Example #19
Source File: data_flow_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def name(self): """The name of the underlying queue.""" if context.in_graph_mode(): return self._queue_ref.op.name return self._name
Example #20
Source File: nadam.py From tensorflow-DSMM with MIT License | 5 votes |
def _create_slots(self, var_list): # Create the beta1 and beta2 accumulators on the same device as the first # variable. Sort the var_list to make sure this device is consistent across # workers (these need to go on the same PS, otherwise some updates are # silently ignored). first_var = min(var_list, key=lambda x: x.name) create_new = self._iterations is None if not create_new and context.in_graph_mode(): create_new = (self._iterations.graph is not first_var.graph) if create_new: with ops.colocate_with(first_var): self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False) self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False) self._iterations = variable_scope.variable(0., name="iterations", trainable=False) self._m_schedule = variable_scope.variable(1., name="m_schedule", trainable=False) # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name)
Example #21
Source File: saver_test_utils.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def __init__(self, name, table_ref=None): if table_ref is None: self.table_ref = gen_lookup_ops._mutable_hash_table_v2( key_dtype=dtypes.string, value_dtype=dtypes.float32, name=name) else: self.table_ref = table_ref self._name = name if context.in_graph_mode(): self._saveable = CheckpointedOp.CustomSaveable(self, name) ops_lib.add_to_collection(ops_lib.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
Example #22
Source File: saver_test_utils.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def saveable(self): if context.in_graph_mode(): return self._saveable else: return CheckpointedOp.CustomSaveable(self, self.name)
Example #23
Source File: saver.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def restore(self, sess, save_path): """Restores previously saved variables. This method runs the ops added by the constructor for restoring variables. It requires a session in which the graph was launched. The variables to restore do not have to have been initialized, as restoring is itself a way to initialize variables. The `save_path` argument is typically a value previously returned from a `save()` call, or a call to `latest_checkpoint()`. Args: sess: A `Session` to use to restore the parameters. None in eager mode. save_path: Path where parameters were previously saved. Raises: ValueError: If save_path is None. """ if self._is_empty: return if save_path is None: raise ValueError("Can't load save_path when it is None.") logging.info("Restoring parameters from %s", save_path) if context.in_graph_mode(): sess.run(self.saver_def.restore_op_name, {self.saver_def.filename_tensor_name: save_path}) else: self._build_eager(save_path, build_save=False, build_restore=True)
Example #24
Source File: slot_creator.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary=True): """Creates a slot initialized using an `Initializer`. The type of the slot is determined by the given value. Args: primary: The primary `Variable` or `Tensor`. initializer: An `Initializer`. The initial value of the slot. shape: Shape of the initial value of the slot. dtype: Type of the value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object. """ # Scope the slot name in the namespace of the primary variable. # Set "primary.op.name + '/' + name" as default name, so the scope name of # optimizer can be shared when reuse is True. Meanwhile when reuse is False # and the same name has been previously used, the scope name will add '_N' # as suffix for unique identifications. validate_shape = shape.is_fully_defined() prefix = primary.op.name if context.in_graph_mode() else primary._shared_name # pylint: disable=protected-access with variable_scope.variable_scope(None, prefix + "/" + name): if colocate_with_primary: with ops.colocate_with(primary): return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype) else: return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype)
Example #25
Source File: resource_variable_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _init_from_proto(self, variable_def, import_scope=None): """Initializes from `VariableDef` proto.""" # Note that init_from_proto is currently not supported in Eager mode. assert context.in_graph_mode() self._in_graph_mode = True assert isinstance(variable_def, variable_pb2.VariableDef) if not variable_def.is_resource: raise ValueError("Trying to restore Variable as ResourceVariable.") # Create from variable_def. g = ops.get_default_graph() self._handle = g.as_graph_element( ops.prepend_name_scope( variable_def.variable_name, import_scope=import_scope)) self._graph_shape = tensor_shape.TensorShape( self._handle.op.get_attr("shape")) self._handle_device = self._handle.device self._handle_name = self._handle.name self._initializer_op = g.as_graph_element( ops.prepend_name_scope( variable_def.initializer_name, import_scope=import_scope)) if variable_def.snapshot_name: self._cached_value = g.as_graph_element( ops.prepend_name_scope( variable_def.snapshot_name, import_scope=import_scope)) else: self._cached_value = None if variable_def.HasField("save_slice_info_def"): self._save_slice_info = variables.Variable.SaveSliceInfo( save_slice_info_def=variable_def.save_slice_info_def) else: self._save_slice_info = None self._caching_device = None self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype")) self._graph_element = self.value() self._constraint = None # LINT.ThenChange(//tensorflow/python/eager/graph_callable.py)
Example #26
Source File: base.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _get_node_attribute_at_index(self, node_index, attr, attr_name): """Private utility to retrieves an attribute (e.g. inputs) from a node. This is used to implement the methods: - get_input_shape_at - get_output_shape_at - get_input_at etc... Arguments: node_index: Integer index of the node from which to retrieve the attribute. attr: Exact node attribute name. attr_name: Human-readable attribute name, for error messages. Returns: The layer's attribute `attr` at the node of index `node_index`. Raises: RuntimeError: If the layer has no inbound nodes, or if called in Eager mode. ValueError: If the index provided does not match any node. """ assert context.in_graph_mode() if not self._inbound_nodes: raise RuntimeError('The layer has never been called ' 'and thus has no defined ' + attr_name + '.') if not len(self._inbound_nodes) > node_index: raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.') values = getattr(self._inbound_nodes[node_index], attr) if len(values) == 1: return values[0] else: return values
Example #27
Source File: core.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def call(self, inputs): outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1)) if context.in_graph_mode(): outputs.set_shape(self._compute_output_shape(inputs.get_shape())) return outputs
Example #28
Source File: graph_callable.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def __init__(self, resource, dtype, name, shape): self._handle = resource self._graph_shape = shape self._handle_device = resource.device self._handle_name = name self._cached_value = None self._initializer_op = None self._caching_device = None self._dtype = dtype self._constraint = None self._in_graph_mode = context.in_graph_mode() if self._in_graph_mode: self._graph_element = self.read_value()
Example #29
Source File: ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _copy(self, ctx=None, device_name=None): """Copies tensor to dest device.""" # pylint: disable=protected-access # Creates a new tensor on the dest device. if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name # pylint: disable=protected-access try: new_tensor = self._copy_to_device(context=ctx._handle, device=device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) if core.active_trace() is not None: core.active_trace().record_tensor("COPY", tensor_id(new_tensor), new_tensor.device, new_tensor.shape.num_elements()) # Record the copy on tape and define backprop copy as well. if not context.in_graph_mode(): self_device = self.device def grad_fun(dresult): return [dresult._copy(device_name=self_device)] tape.record_operation("_copy", [new_tensor], [self], [], grad_fun) return new_tensor # pylint: enable=protected-access
Example #30
Source File: ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def colocate_with(op, ignore_existing=False): if context.in_graph_mode(): return get_default_graph().colocate_with(op, ignore_existing) else: if op is not None: return device(op.device) else: return _NullContextmanager()