Python tensorflow_hub.KerasLayer() Examples

The following are 30 code examples of tensorflow_hub.KerasLayer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow_hub , or try the search function .
Example #1
Source File: tfhub.py    From rpi-vision with MIT License 6 votes vote down vote up
def __init__(
        self,
        input_shape=(224, 224, 3),
        model_url='https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4',
        labels_url='https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt',
        trainable=False
    ):

        self.model_url = model_url
        self.input_shape = input_shape
        self.labels_url = labels_url

        self.model = hub.KerasLayer(
            model_url, input_shape=input_shape, trainable=trainable)
        self.classifier = tf.keras.Sequential([self.model])

        labels_filename = labels_url.split(
            '/')[-1]
        labels_path = tf.keras.utils.get_file(labels_filename, labels_url)
        self.labels = np.array(open(labels_path).read().splitlines()) 
Example #2
Source File: keras_layer_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def testRegularizationLoss(self, model_format):
    export_dir = os.path.join(self.get_temp_dir(), "half-plus-one")
    _dispatch_model_format(model_format, _save_half_plus_one_model,
                           _save_half_plus_one_hub_module_v1, export_dir)
    # Import the half-plus-one model into a consumer model.
    inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32)
    imported = hub.KerasLayer(export_dir, trainable=False)
    outp = imported(inp)
    model = tf.keras.Model(inp, outp)
    # When untrainable, the layer does not contribute regularization losses.
    self.assertAllEqual(model.losses, np.array([0.], dtype=np.float32))
    # When trainable (even set after the fact), the layer forwards its losses.
    imported.trainable = True
    self.assertAllEqual(model.losses, np.array([0.0025], dtype=np.float32))
    # This can be toggled repeatedly.
    imported.trainable = False
    self.assertAllEqual(model.losses, np.array([0.], dtype=np.float32))
    imported.trainable = True
    self.assertAllEqual(model.losses, np.array([0.0025], dtype=np.float32)) 
Example #3
Source File: keras_layer_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def test_load_callable_saved_model_v2_with_signature(self, model_format,
                                                       signature, output_key,
                                                       as_dict):
    export_dir = os.path.join(self.get_temp_dir(), "plus_one_" + model_format)
    _dispatch_model_format(model_format, _save_plus_one_saved_model_v2,
                           _save_plus_one_hub_module_v1, export_dir)
    inputs, expected_outputs = 10., 11.  # Test modules perform increment op.
    layer = hub.KerasLayer(
        export_dir,
        signature=signature,
        output_key=output_key,
        signature_outputs_as_dict=as_dict)
    output = layer(inputs)
    if as_dict:
      self.assertIsInstance(output, dict)
      self.assertEqual(output["output_0"], expected_outputs)
    else:
      self.assertEqual(output, expected_outputs) 
Example #4
Source File: keras_layer_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def _output_shape_list_model_fn(self, features, labels, mode, params):
    inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32)
    kwargs = {}
    if "output_shape" in params:
      kwargs["output_shape"] = params["output_shape"]
    imported = hub.KerasLayer(params["hub_module"], **kwargs)
    outp = imported(inp)
    model = tf.keras.Model(inp, outp)

    out_list = model(features, training=(mode == tf.estimator.ModeKeys.TRAIN))
    for j, out in enumerate(out_list):
      i = j+1  # Sample shapes count from one.
      actual_shape = out.shape.as_list()[1:]  # Without batch size.
      expected_shape = [i]*i if "output_shape" in params else [None]*i
      self.assertEqual(actual_shape, expected_shape)
    predictions = {["one", "two", "three"][i]: out_list[i] for i in range(3)}
    imported.get_config()

    return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions,
                                      loss=None, train_op=None) 
Example #5
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testSaveModelConfig(self, save_from_keras):
    export_dir = os.path.join(self.get_temp_dir(), "half-plus-one")
    _save_half_plus_one_model(export_dir, save_from_keras=save_from_keras)

    model = tf.keras.Sequential([hub.KerasLayer(export_dir)])
    in_value = np.array([[10.]], dtype=np.float32)
    result = model(in_value).numpy()

    json_string = model.to_json()
    new_model = tf.keras.models.model_from_json(
        json_string, custom_objects={"KerasLayer": hub.KerasLayer})
    new_result = new_model(in_value).numpy()
    self.assertEqual(result, new_result) 
Example #6
Source File: utils.py    From models with Apache License 2.0 5 votes vote down vote up
def get_encoder_from_hub(hub_module: str) -> tf.keras.Model:
  """Gets an encoder from hub."""
  input_word_ids = tf.keras.layers.Input(
      shape=(None,), dtype=tf.int32, name='input_word_ids')
  input_mask = tf.keras.layers.Input(
      shape=(None,), dtype=tf.int32, name='input_mask')
  input_type_ids = tf.keras.layers.Input(
      shape=(None,), dtype=tf.int32, name='input_type_ids')
  hub_layer = hub.KerasLayer(hub_module, trainable=True)
  pooled_output, sequence_output = hub_layer(
      [input_word_ids, input_mask, input_type_ids])
  return tf.keras.Model(
      inputs=[input_word_ids, input_mask, input_type_ids],
      outputs=[sequence_output, pooled_output]) 
Example #7
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_logs_if_training_zero_variables(self):
    path = os.path.join(self.get_temp_dir(), "zero-variables")
    _save_model_with_hparams(path)
    layer = hub.KerasLayer(path, trainable=True)
    if hasattr(self, "assertLogs"):  # New in Python 3.4.
      with self.assertLogs(level="ERROR") as logs:
        layer([[10.]])
        layer([[10.]])
      self.assertLen(logs.records, 1)  # Duplicate logging is avoided.
      self.assertRegexpMatches(logs.records[0].msg, "zero trainable weights")
    else:
      # Just test that it runs at all.
      layer([[10.]])
      layer([[10.]]) 
Example #8
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_fails_if_signature_trainable(self):
    export_dir = os.path.join(self.get_temp_dir(), "saved_model_v2_mini")
    _save_plus_one_saved_model_v2(export_dir, save_from_keras=False)
    layer = hub.KerasLayer(export_dir, signature="serving_default",
                           signature_outputs_as_dict=True,
                           trainable=True)
    layer.trainable = True
    with self.assertRaisesRegex(ValueError, "trainable.*=.*True.*unsupported"):
      layer(10.) 
Example #9
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_fails_if_output_key_not_in_layer_outputs(self):
    export_dir = os.path.join(self.get_temp_dir(), "hub_module_v1_mini")
    _save_plus_one_hub_module_v1(export_dir)
    layer = hub.KerasLayer(export_dir, output_key="unknown")
    with self.assertRaisesRegex(
        ValueError, "KerasLayer output does not contain the output key*"):
      layer(10.) 
Example #10
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_fails_if_output_is_not_dict(self):
    export_dir = os.path.join(self.get_temp_dir(), "saved_model_v2_mini")
    _save_plus_one_saved_model_v2(export_dir, save_from_keras=False)
    layer = hub.KerasLayer(export_dir, output_key="output_0")
    with self.assertRaisesRegex(
        ValueError, "Specifying `output_key` is forbidden if output type *"):
      layer(10.) 
Example #11
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_fails_if_setting_both_output_key_and_as_dict(self):
    export_dir = os.path.join(self.get_temp_dir(), "saved_model_v2_mini")
    _save_plus_one_saved_model_v2(export_dir, save_from_keras=False)
    with self.assertRaisesRegex(
        ValueError, "When using a signature, either output_key or "
        "signature_outputs_as_dict=True should be set."):
      hub.KerasLayer(export_dir, signature="default",
                     signature_outputs_as_dict=True,
                     output_key="output") 
Example #12
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_fails_if_saved_model_v2_with_tags(self):
    export_dir = os.path.join(self.get_temp_dir(), "saved_model_v2_mini")
    _save_plus_one_saved_model_v2(export_dir, save_from_keras=False)
    with self.assertRaises(ValueError):
      hub.KerasLayer(export_dir, signature=None, tags=["train"]) 
Example #13
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_fails_if_with_outputs_as_dict_but_no_signature(self):
    export_dir = os.path.join(self.get_temp_dir(), "saved_model_v2_mini")
    _save_plus_one_saved_model_v2(export_dir, save_from_keras=False)
    with self.assertRaisesRegex(
        ValueError,
        "signature_outputs_as_dict is only valid if specifying a signature *"):
      hub.KerasLayer(export_dir, signature_outputs_as_dict=True) 
Example #14
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_keras_layer_get_config(self, model_format, signature, output_key,
                                  as_dict):
    export_dir = os.path.join(self.get_temp_dir(), "plus_one_" + model_format)
    _dispatch_model_format(model_format, _save_plus_one_saved_model_v2,
                           _save_plus_one_hub_module_v1, export_dir)
    inputs = 10.  # Test modules perform increment op.
    layer = hub.KerasLayer(export_dir, signature=signature,
                           output_key=output_key,
                           signature_outputs_as_dict=as_dict)
    outputs = layer(inputs)
    config = layer.get_config()
    new_layer = hub.KerasLayer.from_config(_json_cycle(config))
    new_outputs = new_layer(inputs)
    self.assertEqual(outputs, new_outputs) 
Example #15
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_load_legacy_hub_module_v1_with_signature(self, model_format,
                                                    signature, output_key,
                                                    as_dict):
    export_dir = os.path.join(self.get_temp_dir(), "plus_one_" + model_format)
    _dispatch_model_format(model_format, _save_plus_one_saved_model_v2,
                           _save_plus_one_hub_module_v1, export_dir)
    inputs, expected_outputs = 10., 11.  # Test modules perform increment op.
    layer = hub.KerasLayer(export_dir, signature=signature,
                           output_key=output_key,
                           signature_outputs_as_dict=as_dict)
    output = layer(inputs)
    if as_dict:
      self.assertEqual(output, {"default": expected_outputs})
    else:
      self.assertEqual(output, expected_outputs) 
Example #16
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def test_load_with_defaults(self, model_format):
    export_dir = os.path.join(self.get_temp_dir(), "plus_one_" + model_format)
    _dispatch_model_format(model_format, _save_plus_one_saved_model_v2,
                           _save_plus_one_hub_module_v1, export_dir)
    inputs, expected_outputs = 10., 11.  # Test modules perform increment op.
    layer = hub.KerasLayer(export_dir)
    output = layer(inputs)
    self.assertEqual(output, expected_outputs) 
Example #17
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def _half_plus_one_model_fn(self, features, labels, mode, params):
    inp = features  # This estimator takes a single feature, not a dict.
    imported = hub.KerasLayer(params["hub_module"],
                              trainable=params["hub_trainable"])
    model = tf.keras.Sequential([imported])
    outp = model(inp, training=(mode == tf.estimator.ModeKeys.TRAIN))
    # https://www.tensorflow.org/alpha/guide/migration_guide#using_a_custom_model_fn
    # recommends model.get_losses_for() instead of model.losses.
    model_losses = model.get_losses_for(None) + model.get_losses_for(inp)
    regularization_loss = tf.add_n(model_losses or [0.0])
    predictions = dict(output=outp, regularization_loss=regularization_loss)

    total_loss = None
    if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
      total_loss = tf.add(
          tf.compat.v1.losses.mean_squared_error(labels, outp),
          regularization_loss)

    train_op = None
    if mode == tf.estimator.ModeKeys.TRAIN:
      optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.002)
      train_op = optimizer.minimize(
          total_loss, var_list=model.trainable_variables,
          global_step=tf.compat.v1.train.get_or_create_global_step())

    return tf.estimator.EstimatorSpec(
        mode=mode, predictions=predictions, loss=total_loss, train_op=train_op) 
Example #18
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testGetConfigFromConfigWithHParams(self):
    if tf.__version__ == "2.0.0-alpha0":
      self.skipTest("b/127938157 broke use of default hparams")
    export_dir = os.path.join(self.get_temp_dir(), "with-hparams")
    _save_model_with_hparams(export_dir)  # Has no `save_from_keras` arg.
    layer = hub.KerasLayer(export_dir, arguments=dict(a=10.))  # Leave b=0.
    in_value = np.array([[1.], [2.], [3.]], dtype=np.float32)
    expected_result = np.array([[10.], [20.], [30.]], dtype=np.float32)
    result = layer(in_value).numpy()
    self.assertAllEqual(expected_result, result)

    config = layer.get_config()
    new_layer = hub.KerasLayer.from_config(_json_cycle(config))
    new_result = new_layer(in_value).numpy()
    self.assertAllEqual(result, new_result) 
Example #19
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testGetConfigFromConfig(self, save_from_keras):
    export_dir = os.path.join(self.get_temp_dir(), "half-plus-one")
    _save_half_plus_one_model(export_dir, save_from_keras=save_from_keras)
    layer = hub.KerasLayer(export_dir)
    in_value = np.array([[10.0]], dtype=np.float32)
    result = layer(in_value).numpy()

    config = layer.get_config()
    new_layer = hub.KerasLayer.from_config(_json_cycle(config))
    new_result = new_layer(in_value).numpy()
    self.assertEqual(result, new_result) 
Example #20
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testComputeOutputShape(self, save_from_keras):
    export_dir = os.path.join(self.get_temp_dir(), "half-plus-one")
    _save_half_plus_one_model(export_dir, save_from_keras=save_from_keras)
    layer = hub.KerasLayer(export_dir, output_shape=[1])
    self.assertEqual([10, 1],
                     layer.compute_output_shape(tuple([10, 1])).as_list())
    layer.get_config() 
Example #21
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testInputOutputDict(self, pass_output_shapes):
    """Tests use of input/output dicts."""
    # Create a SavedModel to compute sigma=[x+y, x+2y] and maybe delta=x-y.
    export_dir = os.path.join(self.get_temp_dir(), "with-dicts")
    _save_model_with_dict_input_output(export_dir)
    # Build a Model from it using Keras' "functional" API.
    x_in = tf.keras.layers.Input(shape=(1,), dtype=tf.float32)
    y_in = tf.keras.layers.Input(shape=(1,), dtype=tf.float32)
    dict_in = dict(x=x_in, y=y_in)
    kwargs = dict(arguments=dict(return_dict=True))  # For the SavedModel.
    if pass_output_shapes:
      # Shape inference works without this, but we pass it anyways to exercise
      # that code path and see that map_structure is called correctly
      # and calls Tensor.set_shape() with compatible values.
      kwargs["output_shape"] = dict(sigma=(2,), delta=(1,))
    imported = hub.KerasLayer(export_dir, **kwargs)
    dict_out = imported(dict_in)
    delta_out = dict_out["delta"]
    sigma_out = dict_out["sigma"]
    concat_out = tf.keras.layers.concatenate([delta_out, sigma_out])
    model = tf.keras.Model(dict_in, [delta_out, sigma_out, concat_out])
    # Test the model.
    x = np.array([[11.], [22.], [33.]], dtype=np.float32)
    y = np.array([[1.], [2.], [3.]], dtype=np.float32)
    outputs = model(dict(x=x, y=y))
    self.assertLen(outputs, 3)
    delta, sigma, concat = [x.numpy() for x in outputs]
    self.assertAllClose(delta,
                        np.array([[10.], [20.], [30.]]))
    self.assertAllClose(sigma,
                        np.array([[12., 13.], [24., 26.], [36., 39.]]))
    self.assertAllClose(
        concat,
        np.array([[10., 12., 13.], [20., 24., 26.], [30., 36., 39.]]))
    # Test round-trip through config.
    config = imported.get_config()
    new_layer = hub.KerasLayer.from_config(_json_cycle(config))
    if pass_output_shapes:
      self.assertEqual(new_layer._output_shape, imported._output_shape)
    else:
      self.assertFalse(hasattr(new_layer, "_output_shape")) 
Example #22
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testCustomAttributes(self, save_from_keras):
    """Tests custom attributes (Asset and Variable) on a SavedModel."""
    _skip_if_no_tf_asset(self)
    base_dir = os.path.join(self.get_temp_dir(), "custom-attributes")
    export_dir = os.path.join(base_dir, "model")
    temp_dir = os.path.join(base_dir, "scratch")
    _save_model_with_custom_attributes(export_dir, temp_dir,
                                       save_from_keras=save_from_keras)
    imported = hub.KerasLayer(export_dir)
    expected_outputs = imported.resolved_object.sample_output.value().numpy()
    asset_path = imported.resolved_object.sample_input.asset_path.numpy()
    with tf.io.gfile.GFile(asset_path) as f:
      inputs = tf.constant([[f.read()]], dtype=tf.string)
    actual_outputs = imported(inputs).numpy()
    self.assertAllEqual(expected_outputs, actual_outputs) 
Example #23
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testBatchNormFreezing(self, save_from_keras):
    """Tests imported batch norm with trainable=False."""
    export_dir = os.path.join(self.get_temp_dir(), "batch-norm")
    _save_batch_norm_model(export_dir, save_from_keras=save_from_keras)
    inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32)
    imported = hub.KerasLayer(export_dir, trainable=False)
    var_beta, var_gamma, var_mean, var_variance = _get_batch_norm_vars(imported)
    dense = tf.keras.layers.Dense(
        units=1,
        kernel_initializer=tf.keras.initializers.Constant([[1.5]]),
        use_bias=False)
    outp = dense(imported(inp))
    model = tf.keras.Model(inp, outp)
    # Training the model to x --> 2*x leaves the batch norm layer entirely
    # unchanged (both trained beta&gamma and aggregated mean&variance).
    self.assertAllClose(var_beta.numpy(), np.array([0.0]))
    self.assertAllClose(var_gamma.numpy(), np.array([1.0]))
    self.assertAllClose(var_mean.numpy(), np.array([0.0]))
    self.assertAllClose(var_variance.numpy(), np.array([1.0]))
    model.compile(tf.keras.optimizers.SGD(0.1),
                  "mean_squared_error", run_eagerly=True)
    x = [[1.], [2.], [3.]]
    y = [[2*xi[0]] for xi in x]
    model.fit(np.array(x), np.array(y), batch_size=len(x), epochs=20)
    self.assertAllClose(var_beta.numpy(), np.array([0.0]))
    self.assertAllClose(var_gamma.numpy(), np.array([1.0]))
    self.assertAllClose(var_mean.numpy(), np.array([0.0]))
    self.assertAllClose(var_variance.numpy(), np.array([1.0]))
    self.assertAllClose(model(np.array(x, np.float32)), np.array(y)) 
Example #24
Source File: keras_layer_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testBatchNormRetraining(self, save_from_keras):
    """Tests imported batch norm with trainable=True."""
    export_dir = os.path.join(self.get_temp_dir(), "batch-norm")
    _save_batch_norm_model(export_dir, save_from_keras=save_from_keras)
    inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32)
    imported = hub.KerasLayer(export_dir, trainable=True)
    var_beta, var_gamma, var_mean, var_variance = _get_batch_norm_vars(imported)
    outp = imported(inp)
    model = tf.keras.Model(inp, outp)
    # Retrain the imported batch norm layer on a fixed batch of inputs,
    # which has mean 12.0 and some variance of a less obvious value.
    # The module learns scale and offset parameters that achieve the
    # mapping x --> 2*x for the observed mean and variance.
    model.compile(tf.keras.optimizers.SGD(0.1),
                  "mean_squared_error", run_eagerly=True)
    x = [[11.], [12.], [13.]]
    y = [[2*xi[0]] for xi in x]
    model.fit(np.array(x), np.array(y), batch_size=len(x), epochs=100)
    self.assertAllClose(var_mean.numpy(), np.array([12.0]))
    self.assertAllClose(var_beta.numpy(), np.array([24.0]))
    self.assertAllClose(model(np.array(x, np.float32)), np.array(y))
    # Evaluating the model operates batch norm in inference mode:
    # - Batch statistics are ignored in favor of aggregated statistics,
    #   computing x --> 2*x independent of input distribution.
    # - Update ops are not run, so this doesn't change over time.
    for _ in range(100):
      self.assertAllClose(model(np.array([[10.], [20.], [30.]], np.float32)),
                          np.array([[20.], [40.], [60.]]))
    self.assertAllClose(var_mean.numpy(), np.array([12.0]))
    self.assertAllClose(var_beta.numpy(), np.array([24.0])) 
Example #25
Source File: utils.py    From hub with Apache License 2.0 5 votes vote down vote up
def load_embedding_fn(module):
  return hub.KerasLayer(module) 
Example #26
Source File: make_image_classifier_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testImageSizeForModuleWithVariableInputSize(self):
    model_dir = self._export_global_average_model(has_fixed_input_size=False)
    module_layer = hub.KerasLayer(model_dir)
    self.assertTupleEqual(
        (self.IMAGE_SIZE, self.IMAGE_SIZE),
        make_image_classifier_lib._image_size_for_module(module_layer,
                                                         self.IMAGE_SIZE))
    self.assertTupleEqual(
        (2 * self.IMAGE_SIZE, 2 * self.IMAGE_SIZE),
        make_image_classifier_lib._image_size_for_module(module_layer,
                                                         2 * self.IMAGE_SIZE))
    with self.assertRaisesRegex(ValueError, "none"):
      make_image_classifier_lib._image_size_for_module(module_layer, None) 
Example #27
Source File: make_image_classifier_test.py    From hub with Apache License 2.0 5 votes vote down vote up
def testImageSizeForModuleWithFixedInputSize(self):
    model_dir = self._export_global_average_model(has_fixed_input_size=True)
    module_layer = hub.KerasLayer(model_dir)
    self.assertTupleEqual(
        (self.IMAGE_SIZE, self.IMAGE_SIZE),
        make_image_classifier_lib._image_size_for_module(module_layer, None))
    self.assertTupleEqual(
        (self.IMAGE_SIZE, self.IMAGE_SIZE),
        make_image_classifier_lib._image_size_for_module(module_layer,
                                                         self.IMAGE_SIZE))
    with self.assertRaisesRegex(ValueError, "image size"):
      make_image_classifier_lib._image_size_for_module(
          module_layer, self.IMAGE_SIZE + 1) 
Example #28
Source File: make_image_classifier_lib.py    From hub with Apache License 2.0 5 votes vote down vote up
def make_image_classifier(tfhub_module, image_dir, hparams,
                          requested_image_size=None,
                          log_dir=None):
  """Builds and trains a TensorFLow model for image classification.

  Args:
    tfhub_module: A Python string with the handle of the Hub module.
    image_dir: A Python string naming a directory with subdirectories of images,
      one per class.
    hparams: A HParams object with hyperparameters controlling the training.
    requested_image_size: A Python integer controlling the size of images to
      feed into the Hub module. If the module has a fixed input size, this
      must be omitted or set to that same value.
    log_dir: A directory to write logs for TensorBoard into (defaults to None,
      no logs will then be written).
  """
  module_layer = hub.KerasLayer(tfhub_module,
                                trainable=hparams.do_fine_tuning)
  image_size = _image_size_for_module(module_layer, requested_image_size)
  print("Using module {} with image size {}".format(
      tfhub_module, image_size))
  augmentation_params = dict(
      rotation_range=hparams.rotation_range,
      horizontal_flip=hparams.horizontal_flip,
      width_shift_range=hparams.width_shift_range,
      height_shift_range=hparams.height_shift_range,
      shear_range=hparams.shear_range,
      zoom_range=hparams.zoom_range)
  train_data_and_size, valid_data_and_size, labels = _get_data_with_keras(
      image_dir, image_size, hparams.batch_size, hparams.validation_split,
      hparams.do_data_augmentation, augmentation_params)
  print("Found", len(labels), "classes:", ", ".join(labels))

  model = build_model(module_layer, hparams, image_size, len(labels))
  train_result = train_model(model, hparams, train_data_and_size,
                             valid_data_and_size, log_dir)
  return model, labels, train_result 
Example #29
Source File: make_image_classifier_lib.py    From hub with Apache License 2.0 5 votes vote down vote up
def _image_size_for_module(module_layer, requested_image_size=None):
  """Returns the input image size to use with the given module.

  Args:
    module_layer: A hub.KerasLayer initialized from a Hub module expecting
      image input.
    requested_image_size: An optional Python integer with the user-requested
      height and width of the input image; or None.

  Returns:
    A tuple (height, width) of Python integers that can be used as input
    image size for the given module_layer.

  Raises:
    ValueError: If requested_image_size is set but incompatible with the module.
    ValueError: If the module does not specify a particular input size and
       requested_image_size is not set.
  """
  # TODO(b/139530454): Use a library helper function once available.
  # The stop-gap code below assumes any concrete function backing the
  # module call will accept a batch of images with the one accepted size.
  module_image_size = tuple(
      module_layer._func.__call__  # pylint:disable=protected-access
      .concrete_functions[0].structured_input_signature[0][0].shape[1:3])
  if requested_image_size is None:
    if None in module_image_size:
      raise ValueError("Must specify an image size because "
                       "the selected TF Hub module specifies none.")
    else:
      return module_image_size
  else:
    requested_image_size = tf.TensorShape(
        [requested_image_size, requested_image_size])
    assert requested_image_size.is_fully_defined()
    if requested_image_size.is_compatible_with(module_image_size):
      return tuple(requested_image_size.as_list())
    else:
      raise ValueError("The selected TF Hub module expects image size {}, "
                       "but size {} is requested".format(
                           module_image_size,
                           tuple(requested_image_size.as_list()))) 
Example #30
Source File: bert_models.py    From models with Apache License 2.0 4 votes vote down vote up
def classifier_model(bert_config,
                     num_labels,
                     max_seq_length=None,
                     final_layer_initializer=None,
                     hub_module_url=None,
                     hub_module_trainable=True):
  """BERT classifier model in functional API style.

  Construct a Keras model for predicting `num_labels` outputs from an input with
  maximum sequence length `max_seq_length`.

  Args:
    bert_config: BertConfig or AlbertConfig, the config defines the core BERT or
      ALBERT model.
    num_labels: integer, the number of classes.
    max_seq_length: integer, the maximum input sequence length.
    final_layer_initializer: Initializer for final dense layer. Defaulted
      TruncatedNormal initializer.
    hub_module_url: TF-Hub path/url to Bert module.
    hub_module_trainable: True to finetune layers in the hub module.

  Returns:
    Combined prediction model (words, mask, type) -> (one-hot labels)
    BERT sub-model (words, mask, type) -> (bert_outputs)
  """
  if final_layer_initializer is not None:
    initializer = final_layer_initializer
  else:
    initializer = tf.keras.initializers.TruncatedNormal(
        stddev=bert_config.initializer_range)

  if not hub_module_url:
    bert_encoder = get_transformer_encoder(
        bert_config, max_seq_length, output_range=1)
    return models.BertClassifier(
        bert_encoder,
        num_classes=num_labels,
        dropout_rate=bert_config.hidden_dropout_prob,
        initializer=initializer), bert_encoder

  input_word_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
  input_mask = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
  input_type_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
  bert_model = hub.KerasLayer(hub_module_url, trainable=hub_module_trainable)
  pooled_output, _ = bert_model([input_word_ids, input_mask, input_type_ids])
  output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)(
      pooled_output)

  output = tf.keras.layers.Dense(
      num_labels, kernel_initializer=initializer, name='output')(
          output)
  return tf.keras.Model(
      inputs={
          'input_word_ids': input_word_ids,
          'input_mask': input_mask,
          'input_type_ids': input_type_ids
      },
      outputs=output), bert_model