Python tensorflow.TensorSpec() Examples

The following are 30 code examples of tensorflow.TensorSpec(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: gather_encoder_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_full_commutativity_with_sum(self):
    """Tests that fully commutes with sum property works."""
    spec = tf.TensorSpec((2,), tf.float32)

    encoder = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(test_utils.TimesTwoEncodingStage()).make(),
        spec)
    self.assertTrue(encoder.fully_commutes_with_sum)

    encoder = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(
            test_utils.TimesTwoEncodingStage()).add_parent(
                test_utils.TimesTwoEncodingStage(), T2_VALS).make(), spec)
    self.assertTrue(encoder.fully_commutes_with_sum)

    encoder = core_encoder.EncoderComposer(
        test_utils.SignIntFloatEncodingStage())
    encoder.add_child(test_utils.TimesTwoEncodingStage(), SIF_SIGNS)
    encoder.add_child(test_utils.PlusOneEncodingStage(), SIF_INTS)
    encoder.add_child(test_utils.TimesTwoEncodingStage(), SIF_FLOATS).add_child(
        test_utils.PlusOneOverNEncodingStage(), T2_VALS)
    encoder = gather_encoder.GatherEncoder.from_encoder(encoder.make(), spec)
    self.assertFalse(encoder.fully_commutes_with_sum) 
Example #2
Source File: simple_encoder_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_none_state_equal_to_initial_state(self):
    """Tests that not providing state is the same as initial_state."""
    x = tf.constant(1.0)
    encoder = simple_encoder.SimpleEncoder(
        core_encoder.EncoderComposer(
            test_utils.PlusOneOverNEncodingStage()).make(),
        tf.TensorSpec.from_tensor(x))

    state = encoder.initial_state()
    stateful_iteration = _make_iteration_function(encoder)

    @tf.function
    def stateless_iteration(x):
      encoded_x, _ = encoder.encode(x)
      decoded_x = encoder.decode(encoded_x)
      return encoded_x, decoded_x

    _, encoded_x_stateful, decoded_x_stateful, _ = self.evaluate(
        stateful_iteration(x, state))
    encoded_x_stateless, decoded_x_stateless = self.evaluate(
        stateless_iteration(x))

    self.assertAllClose(encoded_x_stateful, encoded_x_stateless)
    self.assertAllClose(decoded_x_stateful, decoded_x_stateless) 
Example #3
Source File: run_experiment.py    From federated with Apache License 2.0 6 votes vote down vote up
def _broadcast_encoder_fn(value):
  """Function for building encoded broadcast.

  This method decides, based on the tensor size, whether to use lossy
  compression or keep it as is (use identity encoder). The motivation for this
  pattern is due to the fact that compression of small model weights can provide
  only negligible benefit, while at the same time, lossy compression of small
  weights usually results in larger impact on model's accuracy.

  Args:
    value: A tensor or variable to be encoded in server to client communication.

  Returns:
    A `te.core.SimpleEncoder`.
  """
  # TODO(b/131681951): We cannot use .from_tensor(...) because it does not
  # currently support Variables.
  spec = tf.TensorSpec(value.shape, value.dtype)
  if value.shape.num_elements() > 10000:
    return te.encoders.as_simple_encoder(
        te.encoders.uniform_quantization(FLAGS.broadcast_quantization_bits),
        spec)
  else:
    return te.encoders.as_simple_encoder(te.encoders.identity(), spec) 
Example #4
Source File: emnist_test.py    From federated with Apache License 2.0 6 votes vote down vote up
def test_synthetic(self):
    client_data = emnist.get_synthetic(num_clients=4)
    self.assertLen(client_data.client_ids, 4)

    self.assertEqual(
        client_data.element_type_structure,
        collections.OrderedDict([
            ('pixels', tf.TensorSpec(shape=(28, 28), dtype=tf.float32)),
            ('label', tf.TensorSpec(shape=(), dtype=tf.int32)),
        ]))

    for client_id in client_data.client_ids:
      data = self.evaluate(
          list(client_data.create_tf_dataset_for_client(client_id)))
      images = [x['pixels'] for x in data]
      labels = [x['label'] for x in data]
      self.assertLen(labels, 10)
      self.assertCountEqual(labels, list(range(10)))
      self.assertLen(images, 10)
      self.assertEqual(images[0].shape, (28, 28))
      self.assertEqual(images[-1].shape, (28, 28)) 
Example #5
Source File: gather_encoder_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_commutativity_with_sum(self):
    """Tests that encoder that commutes with sum works."""
    x_fn = lambda: tf.constant([1.0, 3.0])
    encoder = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(test_utils.TimesTwoEncodingStage()).make(),
        tf.TensorSpec.from_tensor(x_fn()))

    for num_summands in [1, 3, 7]:
      iteration = _make_iteration_function(encoder, x_fn, num_summands)
      data = self.evaluate(iteration(encoder.initial_state()))
      for i in range(num_summands):
        self.assertAllClose([1.0, 3.0], data.x[i])
        self.assertAllClose(
            [2.0, 6.0], _encoded_x_field(data.encoded_x[i], [TENSORS, T2_VALS]))
        self.assertAllClose(list(data.part_decoded_x[i].values())[0],
                            list(data.encoded_x[i].values())[0])
      self.assertAllClose(np.array([2.0, 6.0]) * num_summands,
                          list(data.summed_part_decoded_x.values())[0])
      self.assertAllClose(np.array([1.0, 3.0]) * num_summands, data.decoded_x) 
Example #6
Source File: tff_gans.py    From federated with Apache License 2.0 6 votes vote down vote up
def tensor_spec_for_batch(dummy_batch):
  """Returns a TensorSpec for the given batch."""
  # TODO(b/131085687): Consider common util shared with model_utils.py.
  if hasattr(dummy_batch, '_asdict'):
    dummy_batch = dummy_batch._asdict()

  def _get_tensor_spec(tensor):
    # Convert input to tensors, possibly from nested lists that need to be
    # converted to a single top-level tensor.
    tensor = tf.convert_to_tensor(tensor)
    # Remove the batch dimension and leave it unspecified.
    spec = tf.TensorSpec(
        shape=[None] + tensor.shape.dims[1:], dtype=tensor.dtype)
    return spec

  return tf.nest.map_structure(_get_tensor_spec, dummy_batch)


# Set cmp=False to get a default hash function for tf.function. 
Example #7
Source File: gather_encoder_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_basic_encode_decode(self):
    """Tests basic encoding and decoding works as expected."""
    x_fn = lambda: tf.random.uniform((12,))
    encoder = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(
            test_utils.PlusOneOverNEncodingStage()).make(),
        tf.TensorSpec.from_tensor(x_fn()))

    num_summands = 3
    iteration = _make_iteration_function(encoder, x_fn, num_summands)
    state = encoder.initial_state()

    for i in range(1, 5):
      data = self.evaluate(iteration(state))
      for j in range(num_summands):
        self.assertAllClose(
            data.x[j] + 1 / i,
            _encoded_x_field(data.encoded_x[j], [TENSORS, PN_VALS]))
      self.assertEqual((i,), data.initial_state)
      self.assertEqual((i + 1,), data.updated_state)
      state = data.updated_state 
Example #8
Source File: gather_encoder_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_none_state_equal_to_initial_state(self):
    """Tests that not providing state is the same as initial_state."""
    x_fn = lambda: tf.constant(1.0)
    encoder = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(
            test_utils.PlusOneOverNEncodingStage()).make(),
        tf.TensorSpec.from_tensor(x_fn()))

    num_summands = 3
    stateful_iteration = _make_iteration_function(encoder, x_fn, num_summands)
    state = encoder.initial_state()
    stateless_iteration = _make_stateless_iteration_function(
        encoder, x_fn, num_summands)

    stateful_data = self.evaluate(stateful_iteration(state))
    stateless_data = self.evaluate(stateless_iteration())

    self.assertAllClose(stateful_data.encoded_x, stateless_data.encoded_x)
    self.assertAllClose(stateful_data.decoded_x, stateless_data.decoded_x) 
Example #9
Source File: simple_encoder_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_input_signature_enforced(self):
    """Tests that encode/decode input signature is enforced."""
    x = tf.constant(1.0)
    encoder = simple_encoder.SimpleEncoder(
        core_encoder.EncoderComposer(
            test_utils.PlusOneOverNEncodingStage()).make(),
        tf.TensorSpec.from_tensor(x))

    state = encoder.initial_state()
    with self.assertRaises(ValueError):
      bad_x = tf.stack([x, x])
      encoder.encode(bad_x, state)
    with self.assertRaises(ValueError):
      bad_state = state + (x,)
      encoder.encode(x, bad_state)
    encoded_x = encoder.encode(x, state)
    with self.assertRaises(ValueError):
      bad_encoded_x = dict(encoded_x)
      bad_encoded_x.update({'x': x})
      encoder.decode(bad_encoded_x) 
Example #10
Source File: py_utils.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def assert_compatible(spec, value):
  """Asserts that values are compatible with given specs.

  Args:
    spec: A structure compatible with `tf.nest`, with `tf.TensorSpec` values.
    value: A collection of values that should be compatible with `spec`. Must be
      the same structure as `spec`.

  Raises:
    TypeError: If `spec` does not contain only `tf.TensorSpec` objects.
    ValueError: If the provided `value` is not compatible with `spec`.
  """

  def validate_spec(s, v):
    if not isinstance(s, tf.TensorSpec):
      raise TypeError('Each value in `spec` must be a tf.TensorSpec.')
    return s.is_compatible_with(v)

  compatible = tf.nest.map_structure(validate_spec, spec, value)
  if not all(tf.nest.flatten(compatible)):
    raise ValueError('The provided value is not compatible with spec.') 
Example #11
Source File: speech_set.py    From athena with Apache License 2.0 5 votes vote down vote up
def sample_signature(self):
        return (
            {
                "input": tf.TensorSpec(
                    shape=(None, None, None, None), dtype=tf.float32
                ),
                "input_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
                "output": tf.TensorSpec(shape=(None, None, None), dtype=tf.float32),
                "output_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
            },
        ) 
Example #12
Source File: speech_set_kaldiio.py    From athena with Apache License 2.0 5 votes vote down vote up
def sample_signature(self):
        return (
            {
                "input": tf.TensorSpec(
                    shape=(None, None, None, None), dtype=tf.float32
                ),
                "input_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
                "output": tf.TensorSpec(shape=(None, None, None), dtype=tf.float32),
                "output_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
            },
        ) 
Example #13
Source File: stackoverflow_dataset_test.py    From federated with Apache License 2.0 5 votes vote down vote up
def test_test_preprocess_fn_return_dataset_element_spec(self):
    token = collections.OrderedDict(tokens=([
        'one must imagine',
    ]))
    ds = tf.data.Dataset.from_tensor_slices(token)
    test_preprocess_fn = stackoverflow_dataset.create_test_dataset_preprocess_fn(
        max_seq_len=10, vocab=['one', 'must'])
    test_preprocessed_ds = test_preprocess_fn(ds)
    self.assertEqual(test_preprocessed_ds.element_spec,
                     (tf.TensorSpec(shape=[None, 10], dtype=tf.int64),
                      tf.TensorSpec(shape=[None, 10], dtype=tf.int64))) 
Example #14
Source File: attacked_fedavg_test.py    From federated with Apache License 2.0 5 votes vote down vote up
def _create_input_spec():
  return _Batch(
      x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32),
      y=tf.TensorSpec(dtype=tf.int64, shape=[None, 1])) 
Example #15
Source File: p13n_utils_test.py    From federated with Apache License 2.0 5 votes vote down vote up
def _model_fn():
  """Constructs a linear model with weights initialized to be zeros."""
  inputs = tf.keras.Input(shape=(2,))  # feature dim = 2
  outputs = tf.keras.layers.Dense(1, kernel_initializer='zeros')(inputs)
  keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
  input_spec = collections.OrderedDict([
      ('x', tf.TensorSpec([None, 2], dtype=tf.float32)),
      ('y', tf.TensorSpec([None, 1], dtype=tf.float32))
  ])
  return tff.learning.from_keras_model(
      keras_model=keras_model,
      input_spec=input_spec,
      loss=tf.keras.losses.MeanSquaredError(),
      metrics=[tf.keras.metrics.MeanAbsoluteError()]) 
Example #16
Source File: attacked_fedavg_test.py    From federated with Apache License 2.0 5 votes vote down vote up
def input_spec(self):
    return collections.OrderedDict(
        x=tf.TensorSpec([None, 784], tf.float32),
        y=tf.TensorSpec([None, 1], tf.int64)) 
Example #17
Source File: iterative_process_builder_test.py    From federated with Apache License 2.0 5 votes vote down vote up
def _get_input_spec():
  input_spec = _Batch(
      x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32),
      y=tf.TensorSpec([None, 1], dtype=tf.int64))
  return input_spec 
Example #18
Source File: hdf5_client_data_test.py    From federated with Apache License 2.0 5 votes vote down vote up
def test_element_type_structure(self):
    expected_structure = {
        'w': tf.TensorSpec(shape=[], dtype=tf.int64),
        'x': tf.TensorSpec(shape=[2], dtype=tf.int32),
        'y': tf.TensorSpec(shape=[], dtype=tf.float32),
        'z': tf.TensorSpec(shape=[], dtype=tf.string),
    }
    client_data = hdf5_client_data.HDF5ClientData(
        HDF5ClientDataTest.test_data_filepath)
    self.assertDictEqual(client_data.element_type_structure, expected_structure) 
Example #19
Source File: speech_recognition_kaldiio.py    From athena with Apache License 2.0 5 votes vote down vote up
def sample_signature(self):
        dim = self.audio_featurizer.dim
        nc = self.audio_featurizer.num_channels
        return (
            {
                "input": tf.TensorSpec(shape=(None, None, dim, nc), dtype=tf.float32),
                "input_length": tf.TensorSpec(shape=(None), dtype=tf.int32),
                "output_length": tf.TensorSpec(shape=(None), dtype=tf.int32),
                "output": tf.TensorSpec(shape=(None, None), dtype=tf.int32),
            },
        ) 
Example #20
Source File: language_set.py    From athena with Apache License 2.0 5 votes vote down vote up
def sample_signature(self):
        return (
            {
                "input": tf.TensorSpec(shape=(None, None), dtype=tf.int32),
                "input_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
                "output": tf.TensorSpec(shape=(None, None), dtype=tf.int32),
                "output_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
            },
        ) 
Example #21
Source File: dense_image_warp_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def _check_interpolation_correctness(
    shape, image_type, flow_type, call_with_unknown_shapes=False, num_probes=5
):
    """Interpolate, and then assert correctness for a few query
    locations."""
    low_precision = image_type == "float16" or flow_type == "float16"
    rand_image, rand_flows = _get_random_image_and_flows(shape, image_type, flow_type)

    if call_with_unknown_shapes:
        fn = dense_image_warp.get_concrete_function(
            tf.TensorSpec(shape=None, dtype=image_type),
            tf.TensorSpec(shape=None, dtype=flow_type),
        )
        interp = fn(
            image=tf.convert_to_tensor(rand_image),
            flow=tf.convert_to_tensor(rand_flows),
        )
    else:
        interp = dense_image_warp(
            image=tf.convert_to_tensor(rand_image),
            flow=tf.convert_to_tensor(rand_flows),
        )

    for _ in range(num_probes):
        batch_index = np.random.randint(0, shape[0])
        y_index = np.random.randint(0, shape[1])
        x_index = np.random.randint(0, shape[2])

        _assert_correct_interpolation_value(
            rand_image,
            rand_flows,
            interp,
            batch_index,
            y_index,
            x_index,
            low_precision=low_precision,
        ) 
Example #22
Source File: dense_image_warp_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_unknown_shape():
    query_points = tf.constant(
        [[0.0, 0.0], [0.0, 1.0], [0.5, 2.0], [1.5, 1.5]], shape=[1, 4, 2]
    )
    fn = interpolate_bilinear.get_concrete_function(
        tf.TensorSpec(shape=None, dtype=tf.float32),
        tf.TensorSpec(shape=None, dtype=tf.float32),
    )
    for shape in (2, 4, 3, 6), (6, 2, 4, 3), (1, 2, 4, 3):
        image = tf.ones(shape=shape)
        res = fn(image, query_points)
        assert res.shape == (shape[0], 4, shape[3]) 
Example #23
Source File: transform_ops_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_unknown_shape():
    fn = tf.function(transform_ops.rotate).get_concrete_function(
        tf.TensorSpec(shape=None, dtype=tf.float32), 0
    )
    for shape in (2, 4), (2, 4, 3), (1, 2, 4, 3):
        image = tf.ones(shape=shape)
        np.testing.assert_equal(image.numpy(), fn(image).numpy())


# TODO: Parameterize on dtypes 
Example #24
Source File: transform_ops_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_transform_unknown_shape():
    fn = tf.function(transform_ops.transform).get_concrete_function(
        tf.TensorSpec(shape=None, dtype=tf.float32), [1, 0, 0, 0, 1, 0, 0, 0]
    )
    for shape in (2, 4), (2, 4, 3), (1, 2, 4, 3):
        image = tf.ones(shape=shape)
        np.testing.assert_equal(image.numpy(), fn(image).numpy()) 
Example #25
Source File: interpolate_spline_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_fully_unspecified_shape():
    """Ensure that erreor is thrown when input/output dim unspecified."""
    tp = _QuadraticPlusSinProblemND()
    (query_points, _, train_points, train_values) = tp.get_problem(dtype="float64")

    feature_dim = query_points.shape[-1]
    value_dim = train_values.shape[-1]

    order = 1
    reg_weight = 0.01

    # Get concrete functions such that the batch size, number of train points,
    # and number of query points are not known at graph construction time.
    with pytest.raises(ValueError):
        tf.function(interpolate_spline).get_concrete_function(
            tf.TensorSpec(shape=[None, None, None], dtype=train_points.dtype),
            tf.TensorSpec(shape=[None, None, value_dim], dtype=train_values.dtype),
            tf.TensorSpec(shape=[None, None, feature_dim], dtype=query_points.dtype),
            order,
            reg_weight,
        )

    with pytest.raises(ValueError):
        tf.function(interpolate_spline).get_concrete_function(
            tf.TensorSpec(shape=[None, None, feature_dim], dtype=train_points.dtype),
            tf.TensorSpec(shape=[None, None, None], dtype=train_values.dtype),
            tf.TensorSpec(shape=[None, None, feature_dim], dtype=query_points.dtype),
            order,
            reg_weight,
        ) 
Example #26
Source File: filters_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_unknown_shape_median(shape):
    fn = median_filter2d.get_concrete_function(
        tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
        padding="CONSTANT",
        constant_values=1.0,
    )

    image = tf.ones(shape=shape)
    np.testing.assert_equal(image.numpy(), fn(image).numpy()) 
Example #27
Source File: filters_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_none_channels_median():
    # 3-D image
    fn = median_filter2d.get_concrete_function(
        tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
    )
    fn(tf.ones(shape=(3, 3, 1)))
    fn(tf.ones(shape=(3, 3, 3)))

    # 4-D image
    fn = median_filter2d.get_concrete_function(
        tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
    )
    fn(tf.ones(shape=(1, 3, 3, 1)))
    fn(tf.ones(shape=(1, 3, 3, 3))) 
Example #28
Source File: filters_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_unknown_shape_mean(shape):
    fn = mean_filter2d.get_concrete_function(
        tf.TensorSpec(shape=None, dtype=tf.dtypes.float32),
        padding="CONSTANT",
        constant_values=1.0,
    )

    image = tf.ones(shape=shape)
    np.testing.assert_equal(image.numpy(), fn(image).numpy()) 
Example #29
Source File: filters_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_none_channels_mean():
    # 3-D image
    fn = mean_filter2d.get_concrete_function(
        tf.TensorSpec(dtype=tf.dtypes.float32, shape=(3, 3, None))
    )
    fn(tf.ones(shape=(3, 3, 1)))
    fn(tf.ones(shape=(3, 3, 3)))

    # 4-D image
    fn = mean_filter2d.get_concrete_function(
        tf.TensorSpec(dtype=tf.dtypes.float32, shape=(1, 3, 3, None))
    )
    fn(tf.ones(shape=(1, 3, 3, 1)))
    fn(tf.ones(shape=(1, 3, 3, 3))) 
Example #30
Source File: utils_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_from_4D_image_with_unknown_shape():
    for shape in (2, 4), (2, 4, 1), (1, 2, 4, 1):
        exp = tf.ones(shape=shape)
        fn = tf.function(img_utils.from_4D_image).get_concrete_function(
            tf.TensorSpec(shape=None, dtype=tf.float32), tf.size(shape)
        )
        res = fn(tf.ones(shape=(1, 2, 4, 1)), tf.size(shape))
        np.testing.assert_equal(exp.numpy(), res.numpy())