Python tensorflow.saved_model() Examples
The following are 11
code examples of tensorflow.saved_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: tf_savedmodel_artifact.py From BentoML with Apache License 2.0 | 6 votes |
def pack( self, obj, signatures=None, options=None ): # pylint:disable=arguments-differ """ Args: obj: Either a path(str/byte/os.PathLike) containing exported `tf.saved_model` files, or a Trackable object mapping to the `obj` parameter of `tf.saved_model.save` signatures: options: """ if _is_path_like(obj): return _ExportedTensorflowSavedModelArtifactWrapper(self, obj) return _TensorflowSavedModelArtifactWrapper(self, obj, signatures, options)
Example #2
Source File: keras_layer_test.py From hub with Apache License 2.0 | 6 votes |
def _save_model_with_obscurely_shaped_list_output(export_dir): """Writes SavedModel with hard-to-predict output shapes.""" def broadcast_obscurely_to(input, shape): """Like tf.broadcast_to(), but hostile to static shape propagation.""" obscured_shape = tf.cast(tf.cast(shape, tf.float32) # Add small random noise that gets rounded away. + 0.1*tf.sin(tf.random.uniform((), -3, +3)) + 0.3, tf.int32) return tf.broadcast_to(input, obscured_shape) @tf.function( input_signature=[tf.TensorSpec(shape=(None, 1), dtype=tf.float32)]) def call_fn(x): # For each batch element x, the three outputs are # value x with shape (1) # value 2*x broadcast to shape (2,2) # value 3*x broadcast to shape (3,3,3) batch_size = tf.shape(x)[0] return [broadcast_obscurely_to(tf.reshape(i*x, [batch_size] + [1]*i), tf.concat([[batch_size], [i]*i], axis=0)) for i in range(1, 4)] obj = tf.train.Checkpoint() obj.__call__ = call_fn tf.saved_model.save(obj, export_dir)
Example #3
Source File: fcn8s_tensorflow.py From fcn8s_tensorflow with GNU General Public License v3.0 | 5 votes |
def _load_vgg16(self): ''' Loads the pretrained, convolutionalized VGG-16 model into the session. ''' # 1: Load the model tf.saved_model.loader.load(sess=self.sess, tags=[self.vgg16_tag], export_dir=self.vgg16_dir) # 2: Return the tensors of interest graph = tf.get_default_graph() vgg16_image_input_tensor_name = 'image_input:0' vgg16_keep_prob_tensor_name = 'keep_prob:0' vgg16_pool3_out_tensor_name = 'layer3_out:0' vgg16_pool4_out_tensor_name = 'layer4_out:0' vgg16_fc7_out_tensor_name = 'layer7_out:0' image_input = graph.get_tensor_by_name(vgg16_image_input_tensor_name) keep_prob = graph.get_tensor_by_name(vgg16_keep_prob_tensor_name) pool3_out = graph.get_tensor_by_name(vgg16_pool3_out_tensor_name) pool4_out = graph.get_tensor_by_name(vgg16_pool4_out_tensor_name) fc7_out = graph.get_tensor_by_name(vgg16_fc7_out_tensor_name) return image_input, keep_prob, pool3_out, pool4_out, fc7_out
Example #4
Source File: tf_savedmodel_artifact.py From BentoML with Apache License 2.0 | 5 votes |
def _load_tf_saved_model(path): try: import tensorflow as tf from tensorflow.python.training.tracking.tracking import AutoTrackable TF2 = tf.__version__.startswith('2') except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact" ) if TF2: return tf.saved_model.load(path) else: loaded = tf.compat.v2.saved_model.load(path) if isinstance(loaded, AutoTrackable) and not hasattr(loaded, "__call__"): logger.warning( '''Importing SavedModels from TensorFlow 1.x. `outputs = imported(inputs)` is not supported in bento service due to tensorflow API. Recommended usage: ```python from tensorflow.python.saved_model import signature_constants imported = tf.saved_model.load(path_to_v1_saved_model) wrapped_function = imported.signatures[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] wrapped_function(tf.ones([])) ``` See https://www.tensorflow.org/api_docs/python/tf/saved_model/load for details. ''' ) return loaded
Example #5
Source File: tf_savedmodel_artifact.py From BentoML with Apache License 2.0 | 5 votes |
def save(self, dst): try: import tensorflow as tf TF2 = tf.__version__.startswith('2') except ImportError: raise MissingDependencyException( "Tensorflow package is required to use TfSavedModelArtifact." ) if TF2: return tf.saved_model.save( self.obj, self.spec._saved_model_path(dst), signatures=self.signatures, options=self.options, ) else: if self.options: logger.warning( "Parameter 'options: %s' is ignored when using Tensorflow " "version 1", str(self.options), ) return tf.saved_model.save( self.obj, self.spec._saved_model_path(dst), signatures=self.signatures )
Example #6
Source File: keras_layer_test.py From hub with Apache License 2.0 | 5 votes |
def _skip_if_no_tf_asset(test_case): if not hasattr(tf.saved_model, "Asset"): test_case.skipTest( "Your TensorFlow version (%s) looks too old for creating SavedModels " " with assets." % tf.__version__)
Example #7
Source File: keras_layer_test.py From hub with Apache License 2.0 | 5 votes |
def _save_half_plus_one_model(export_dir, save_from_keras=False): """Writes Hub-style SavedModel to compute y = wx + 1, with w trainable.""" inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) times_w = tf.keras.layers.Dense( units=1, kernel_initializer=tf.keras.initializers.Constant([[0.5]]), kernel_regularizer=tf.keras.regularizers.l2(0.01), use_bias=False) plus_1 = tf.keras.layers.Dense( units=1, kernel_initializer=tf.keras.initializers.Constant([[1.0]]), bias_initializer=tf.keras.initializers.Constant([1.0]), trainable=False) outp = plus_1(times_w(inp)) model = tf.keras.Model(inp, outp) if save_from_keras: tf.saved_model.save(model, export_dir) return @tf.function(input_signature=[ tf.TensorSpec(shape=(None, 1), dtype=tf.float32)]) def call_fn(inputs): return model(inputs, training=False) obj = tf.train.Checkpoint() obj.__call__ = call_fn obj.variables = model.trainable_variables + model.non_trainable_variables assert len(obj.variables) == 3, "Expect 2 kernels and 1 bias." obj.trainable_variables = [times_w.kernel] assert(len(model.losses) == 1), "Expect 1 regularization loss." obj.regularization_losses = [ tf.function(lambda: model.losses[0], input_signature=[])] tf.saved_model.save(obj, export_dir)
Example #8
Source File: keras_layer_test.py From hub with Apache License 2.0 | 5 votes |
def _save_batch_norm_model(export_dir, save_from_keras=False): """Writes a Hub-style SavedModel with a batch norm layer.""" inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) bn = tf.keras.layers.BatchNormalization(momentum=0.8) outp = bn(inp) model = tf.keras.Model(inp, outp) if save_from_keras: tf.saved_model.save(model, export_dir) return @tf.function def call_fn(inputs, training=False): return model(inputs, training=training) for training in (True, False): call_fn.get_concrete_function(tf.TensorSpec((None, 1), tf.float32), training=training) obj = tf.train.Checkpoint() obj.__call__ = call_fn # Test assertions pick up variables by their position here. obj.trainable_variables = [bn.beta, bn.gamma] assert _tensors_names_set(obj.trainable_variables) == _tensors_names_set( model.trainable_variables) obj.variables = [bn.beta, bn.gamma, bn.moving_mean, bn.moving_variance] assert _tensors_names_set(obj.variables) == _tensors_names_set( model.trainable_variables + model.non_trainable_variables) obj.regularization_losses = [] assert not model.losses tf.saved_model.save(obj, export_dir)
Example #9
Source File: keras_layer_test.py From hub with Apache License 2.0 | 5 votes |
def _save_model_with_hparams(export_dir): """Writes a Hub-style SavedModel to compute y = ax + b with hparams a, b.""" @tf.function(input_signature=[ tf.TensorSpec(shape=(None, 1), dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.float32), tf.TensorSpec(shape=(), dtype=tf.float32)]) def call_fn(x, a=1., b=0.): return tf.add(tf.multiply(a, x), b) obj = tf.train.Checkpoint() obj.__call__ = call_fn tf.saved_model.save(obj, export_dir)
Example #10
Source File: keras_layer_test.py From hub with Apache License 2.0 | 5 votes |
def _save_model_with_custom_attributes(export_dir, temp_dir, save_from_keras=False): """Writes a Hub-style SavedModel with a custom attributes.""" # Calling the module parses an integer. f = lambda a: tf.strings.to_number(a, tf.int64) if save_from_keras: inp = tf.keras.layers.Input(shape=(1,), dtype=tf.string) outp = tf.keras.layers.Lambda(f)(inp) model = tf.keras.Model(inp, outp) else: model = tf.train.Checkpoint() model.__call__ = tf.function( input_signature=[tf.TensorSpec(shape=(None, 1), dtype=tf.string)])(f) # Running on the `sample_input` file yields the `sample_output` value. asset_source_file_name = os.path.join(temp_dir, "number.txt") tf.io.gfile.makedirs(temp_dir) with tf.io.gfile.GFile(asset_source_file_name, "w") as f: f.write("12345\n") model.sample_input = tf.saved_model.Asset(asset_source_file_name) model.sample_output = tf.Variable([[12345]], dtype=tf.int64) # Save model and invalidate the original asset file name. tf.saved_model.save(model, export_dir) tf.io.gfile.remove(asset_source_file_name) return export_dir
Example #11
Source File: keras_layer_test.py From hub with Apache License 2.0 | 5 votes |
def _save_plus_one_saved_model_v2(path, save_from_keras=False): """Writes Hub-style SavedModel that increments the input by one.""" if save_from_keras: raise NotImplementedError() obj = tf.train.Checkpoint() @tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)]) def plus_one(x): return x + 1 obj.__call__ = plus_one tf.saved_model.save(obj, path)