Python tensorflow.python.framework.dtypes.resource() Examples
The following are 30
code examples of tensorflow.python.framework.dtypes.resource().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.framework.dtypes
, or try the search function
.
Example #1
Source File: optimizer.py From lambda-packs with MIT License | 6 votes |
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices)
Example #2
Source File: io_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def serialize_state(self, name=None): """Produce a string tensor that encodes the state of a reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: name: A name for the operation (optional). Returns: A string Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_serialize_state_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_serialize_state(self._reader_ref, name=name)
Example #3
Source File: io_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def restore_state(self, state, name=None): """Restore a reader to a previously saved state. Not all Readers support being restored, so this can produce an Unimplemented error. Args: state: A string Tensor. Result of a SerializeState of a Reader with matching type. name: A name for the operation (optional). Returns: The created Operation. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_restore_state_v2( self._reader_ref, state, name=name) else: return gen_io_ops._reader_restore_state( self._reader_ref, state, name=name)
Example #4
Source File: io_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def num_records_produced(self, name=None): """Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_num_records_produced_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_num_records_produced(self._reader_ref, name=name)
Example #5
Source File: load.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _graph_def_from_concrete_fn(cfs): if len(cfs) != 1: raise NotImplementedError("Only a single concrete function is supported.") frozen_fn = _convert_variables_to_constants_v2(cfs[0], lower_control_flow=False) graph_def = frozen_fn.graph.as_graph_def(add_shapes=True) # run a Grappler's constant folding pass. fn_inputs = [t for t in frozen_fn.inputs if t.dtype != _dtypes.resource] graph_def = _run_graph_optimizations( graph_def, fn_inputs, frozen_fn.outputs, config=_get_grappler_config(["constfold", "dependency"]), graph=frozen_fn.graph, ) return graph_def
Example #6
Source File: gen_io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _reader_reset_v2(reader_handle, name=None): r"""Restore a Reader to its initial clean state. Args: reader_handle: A `Tensor` of type `resource`. Handle to a Reader. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "ReaderResetV2", reader_handle=reader_handle, name=name) return _op else: reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource) _inputs_flat = [reader_handle] _attrs = None _result = _execute.execute(b"ReaderResetV2", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Example #7
Source File: data_flow_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def is_closed(self, name=None): """ Returns true if queue is closed. This operation returns true if the queue is closed and false if the queue is open. Args: name: A name for the operation (optional). Returns: True if the queue is closed and false if the queue is open. """ if name is None: name = "%s_Is_Closed" % self._name if self._queue_ref.dtype == _dtypes.resource: return gen_data_flow_ops.queue_is_closed_v2(self._queue_ref,name=name) else: return gen_data_flow_ops.queue_is_closed_(self._queue_ref,name=name)
Example #8
Source File: dataset_ops.py From lambda-packs with MIT License | 6 votes |
def __init__(self, iterator_resource, initializer, output_types, output_shapes): """Creates a new iterator from the given iterator resource. NOTE(mrry): Most users will not call this initializer directly, and will instead use `Iterator.from_dataset()` or `Dataset.make_one_shot_iterator()`. Args: iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the iterator. initializer: A `tf.Operation` that should be run to initialize this iterator. output_types: A nested structure of `tf.DType` objects corresponding to each component of an element of this iterator. output_shapes: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. """ self._iterator_resource = iterator_resource self._initializer = initializer self._output_types = output_types self._output_shapes = output_shapes
Example #9
Source File: optimizer.py From lambda-packs with MIT License | 6 votes |
def _resource_apply_sparse(self, grad, handle, indices): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError()
Example #10
Source File: io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def serialize_state(self, name=None): """Produce a string tensor that encodes the state of a reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: name: A name for the operation (optional). Returns: A string Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_serialize_state_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_serialize_state(self._reader_ref, name=name)
Example #11
Source File: io_ops.py From lambda-packs with MIT License | 6 votes |
def restore_state(self, state, name=None): """Restore a reader to a previously saved state. Not all Readers support being restored, so this can produce an Unimplemented error. Args: state: A string Tensor. Result of a SerializeState of a Reader with matching type. name: A name for the operation (optional). Returns: The created Operation. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_restore_state_v2( self._reader_ref, state, name=name) else: return gen_io_ops._reader_restore_state( self._reader_ref, state, name=name)
Example #12
Source File: io_ops.py From lambda-packs with MIT License | 6 votes |
def serialize_state(self, name=None): """Produce a string tensor that encodes the state of a reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: name: A name for the operation (optional). Returns: A string Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_serialize_state_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_serialize_state(self._reader_ref, name=name)
Example #13
Source File: io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def num_records_produced(self, name=None): """Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_num_records_produced_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_num_records_produced(self._reader_ref, name=name)
Example #14
Source File: io_ops.py From lambda-packs with MIT License | 6 votes |
def num_records_produced(self, name=None): """Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_num_records_produced_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_num_records_produced(self._reader_ref, name=name)
Example #15
Source File: gen_dataset_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def restore_iterator(iterator, path, name=None): r"""Restores the state of the `iterator` from the checkpoint saved at `path` using "SaveIterator". Args: iterator: A `Tensor` of type `resource`. path: A `Tensor` of type `string`. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "RestoreIterator", iterator=iterator, path=path, name=name) return _op else: iterator = _ops.convert_to_tensor(iterator, _dtypes.resource) path = _ops.convert_to_tensor(path, _dtypes.string) _inputs_flat = [iterator, path] _attrs = None _result = _execute.execute(b"RestoreIterator", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Example #16
Source File: optimizer.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices)
Example #17
Source File: optimizer.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def _resource_apply_sparse(self, grad, handle, indices): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError()
Example #18
Source File: gen_lookup_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _lookup_table_import_v2(table_handle, keys, values, name=None): r"""Replaces the contents of the table with the specified keys and values. The tensor `keys` must be of the same type as the keys of the table. The tensor `values` must be of the type of the table values. Args: table_handle: A `Tensor` of type `resource`. Handle to the table. keys: A `Tensor`. Any shape. Keys to look up. values: A `Tensor`. Values to associate with keys. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "LookupTableImportV2", table_handle=table_handle, keys=keys, values=values, name=name) return _op else: _attr_Tin, (keys,) = _execute.args_to_matching_eager([keys], _ctx) _attr_Tin = _attr_Tin.as_datatype_enum _attr_Tout, (values,) = _execute.args_to_matching_eager([values], _ctx) _attr_Tout = _attr_Tout.as_datatype_enum table_handle = _ops.convert_to_tensor(table_handle, _dtypes.resource) _inputs_flat = [table_handle, keys, values] _attrs = ("Tin", _attr_Tin, "Tout", _attr_Tout) _result = _execute.execute(b"LookupTableImportV2", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Example #19
Source File: gen_io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _reader_num_records_produced_v2(reader_handle, name=None): r"""Returns the number of records this Reader has produced. This is the same as the number of ReaderRead executions that have succeeded. Args: reader_handle: A `Tensor` of type `resource`. Handle to a Reader. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "ReaderNumRecordsProducedV2", reader_handle=reader_handle, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None else: reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource) _inputs_flat = [reader_handle] _attrs = None _result = _execute.execute(b"ReaderNumRecordsProducedV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ReaderNumRecordsProducedV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result
Example #20
Source File: gen_lookup_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _lookup_table_export_v2(table_handle, Tkeys, Tvalues, name=None): r"""Outputs all keys and values in the table. Args: table_handle: A `Tensor` of type `resource`. Handle to the table. Tkeys: A `tf.DType`. Tvalues: A `tf.DType`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (keys, values). keys: A `Tensor` of type `Tkeys`. Vector of all keys present in the table. values: A `Tensor` of type `Tvalues`. Tensor of all values in the table. Indexed in parallel with `keys`. """ Tkeys = _execute.make_type(Tkeys, "Tkeys") Tvalues = _execute.make_type(Tvalues, "Tvalues") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "LookupTableExportV2", table_handle=table_handle, Tkeys=Tkeys, Tvalues=Tvalues, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tkeys", _op.get_attr("Tkeys"), "Tvalues", _op.get_attr("Tvalues")) else: table_handle = _ops.convert_to_tensor(table_handle, _dtypes.resource) _inputs_flat = [table_handle] _attrs = ("Tkeys", Tkeys, "Tvalues", Tvalues) _result = _execute.execute(b"LookupTableExportV2", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LookupTableExportV2", _inputs_flat, _attrs, _result, name) _result = _LookupTableExportV2Output._make(_result) return _result
Example #21
Source File: gen_lookup_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _initialize_table_v2(table_handle, keys, values, name=None): r"""Table initializer that takes two tensors for keys and values respectively. Args: table_handle: A `Tensor` of type `resource`. Handle to a table which will be initialized. keys: A `Tensor`. Keys of type Tkey. values: A `Tensor`. Values of type Tval. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "InitializeTableV2", table_handle=table_handle, keys=keys, values=values, name=name) return _op else: _attr_Tkey, (keys,) = _execute.args_to_matching_eager([keys], _ctx) _attr_Tkey = _attr_Tkey.as_datatype_enum _attr_Tval, (values,) = _execute.args_to_matching_eager([values], _ctx) _attr_Tval = _attr_Tval.as_datatype_enum table_handle = _ops.convert_to_tensor(table_handle, _dtypes.resource) _inputs_flat = [table_handle, keys, values] _attrs = ("Tkey", _attr_Tkey, "Tval", _attr_Tval) _result = _execute.execute(b"InitializeTableV2", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Example #22
Source File: gen_lookup_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _lookup_table_size_v2(table_handle, name=None): r"""Computes the number of elements in the given table. Args: table_handle: A `Tensor` of type `resource`. Handle to the table. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. Scalar that contains number of elements in the table. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "LookupTableSizeV2", table_handle=table_handle, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None else: table_handle = _ops.convert_to_tensor(table_handle, _dtypes.resource) _inputs_flat = [table_handle] _attrs = None _result = _execute.execute(b"LookupTableSizeV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LookupTableSizeV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result
Example #23
Source File: gen_resource_variable_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def variable_shape(input, out_type=_dtypes.int32, name=None): r"""Returns the shape of the variable pointed to by `resource`. This operation returns a 1-D integer tensor representing the shape of `input`. For example: ``` # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] shape(t) ==> [2, 2, 3] ``` Args: input: A `Tensor` of type `resource`. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`. """ if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "VariableShape", input=input, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("out_type", _op.get_attr("out_type")) else: input = _ops.convert_to_tensor(input, _dtypes.resource) _inputs_flat = [input] _attrs = ("out_type", out_type) _result = _execute.execute(b"VariableShape", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "VariableShape", _inputs_flat, _attrs, _result, name) _result, = _result return _result
Example #24
Source File: gen_io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _reader_serialize_state_v2(reader_handle, name=None): r"""Produce a string tensor that encodes the state of a Reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: reader_handle: A `Tensor` of type `resource`. Handle to a Reader. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "ReaderSerializeStateV2", reader_handle=reader_handle, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None else: reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource) _inputs_flat = [reader_handle] _attrs = None _result = _execute.execute(b"ReaderSerializeStateV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ReaderSerializeStateV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result
Example #25
Source File: gen_io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _reader_restore_state_v2(reader_handle, state, name=None): r"""Restore a reader to a previously saved state. Not all Readers support being restored, so this can produce an Unimplemented error. Args: reader_handle: A `Tensor` of type `resource`. Handle to a Reader. state: A `Tensor` of type `string`. Result of a ReaderSerializeState of a Reader with type matching reader_handle. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "ReaderRestoreStateV2", reader_handle=reader_handle, state=state, name=name) return _op else: reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource) state = _ops.convert_to_tensor(state, _dtypes.string) _inputs_flat = [reader_handle, state] _attrs = None _result = _execute.execute(b"ReaderRestoreStateV2", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Example #26
Source File: gen_io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _reader_num_work_units_completed_v2(reader_handle, name=None): r"""Returns the number of work units this Reader has finished processing. Args: reader_handle: A `Tensor` of type `resource`. Handle to a Reader. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "ReaderNumWorkUnitsCompletedV2", reader_handle=reader_handle, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = None else: reader_handle = _ops.convert_to_tensor(reader_handle, _dtypes.resource) _inputs_flat = [reader_handle] _attrs = None _result = _execute.execute(b"ReaderNumWorkUnitsCompletedV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ReaderNumWorkUnitsCompletedV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result
Example #27
Source File: gen_resource_variable_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def assign_sub_variable_op(resource, value, name=None): r"""Subtracts a value from the current value of a variable. Any ReadVariableOp which depends directly or indirectly on this assign is guaranteed to see the incremented value or a subsequent newer one. Outputs the incremented value, which can be used to totally order the increments to this variable. Args: resource: A `Tensor` of type `resource`. handle to the resource in which to store the variable. value: A `Tensor`. the value by which the variable will be incremented. name: A name for the operation (optional). Returns: The created Operation. """ _ctx = _context.context() if _ctx.in_graph_mode(): _, _, _op = _op_def_lib._apply_op_helper( "AssignSubVariableOp", resource=resource, value=value, name=name) return _op else: _attr_dtype, (value,) = _execute.args_to_matching_eager([value], _ctx) _attr_dtype = _attr_dtype.as_datatype_enum resource = _ops.convert_to_tensor(resource, _dtypes.resource) _inputs_flat = [resource, value] _attrs = ("dtype", _attr_dtype) _result = _execute.execute(b"AssignSubVariableOp", 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Example #28
Source File: io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def reset(self, name=None): """Restore a reader to its initial clean state. Args: name: A name for the operation (optional). Returns: The created Operation. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_reset_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_reset(self._reader_ref, name=name)
Example #29
Source File: io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def num_work_units_completed(self, name=None): """Returns the number of work units this reader has finished processing. Args: name: A name for the operation (optional). Returns: An int64 Tensor. """ if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_num_work_units_completed_v2(self._reader_ref, name=name) else: return gen_io_ops._reader_num_work_units_completed(self._reader_ref, name=name)
Example #30
Source File: io_ops.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def read_up_to(self, queue, num_records, # pylint: disable=invalid-name name=None): """Returns up to num_records (key, value) pairs produced by a reader. Will dequeue a work unit from queue if necessary (e.g., when the Reader needs to start reading from a new file since it has finished with the previous file). It may return less than num_records even before the last batch. Args: queue: A Queue or a mutable string Tensor representing a handle to a Queue, with string work items. num_records: Number of records to read. name: A name for the operation (optional). Returns: A tuple of Tensors (keys, values). keys: A 1-D string Tensor. values: A 1-D string Tensor. """ if isinstance(queue, ops.Tensor): queue_ref = queue else: queue_ref = queue.queue_ref if self._reader_ref.dtype == dtypes.resource: return gen_io_ops._reader_read_up_to_v2(self._reader_ref, queue_ref, num_records, name=name) else: # For compatibility with pre-resource queues, create a ref(string) tensor # which can be looked up as the same queue by a resource manager. old_queue_op = gen_data_flow_ops._fake_queue(queue_ref) return gen_io_ops._reader_read_up_to(self._reader_ref, old_queue_op, num_records, name=name)