Python tensorflow.Saver() Examples
The following are 19
code examples of tensorflow.Saver().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: core.py From muffnn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _build_tf_graph(self): """Build the TF graph, setup model saving and setup a TF session. Notes ----- This method initializes a TF Saver and a TF Session via ```python self._saver = tf.train.Saver() self._session = tf.Session() ``` These calls are made after `self._set_up_graph()`` is called. See the main class docs for how to properly call this method from a child class. """ self._set_up_graph() self._saver = tf.train.Saver() self._session = tf.Session()
Example #2
Source File: learn_path_embeddings.py From Gun-Detector with Apache License 2.0 | 5 votes |
def epoch_completed(model, session, epoch, epoch_loss, val_instances, val_labels, saver, save_path, best_f1): """Runs every time an epoch completes. Print the performance on the validation set, and update the saved model if its performance is better on the previous ones. If the performance dropped, tell the training to stop. Args: model: The currently trained path-based model. session: The current TensorFlow session. epoch: The epoch number. epoch_loss: The current epoch loss. val_instances: The validation set instances (evaluation between epochs). val_labels: The validation set labels (for evaluation between epochs). saver: tf.Saver object save_path: Where to save the model. best_f1: the best F1 achieved so far. Returns: The F1 achieved on the training set. """ # Evaluate on the validation set val_pred = model.predict(session, val_instances) precision, recall, f1, _ = metrics.precision_recall_fscore_support( val_labels, val_pred, average='weighted') print( 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( epoch + 1, model.hparams.num_epochs, epoch_loss, precision, recall, f1)) if f1 > best_f1: print('Saving model in: %s' % (save_path + 'best.ckpt')) saver.save(session, save_path + 'best.ckpt') print('Model saved in file: %s' % (save_path + 'best.ckpt')) return f1
Example #3
Source File: learn_path_embeddings.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def epoch_completed(model, session, epoch, epoch_loss, val_instances, val_labels, saver, save_path, best_f1): """Runs every time an epoch completes. Print the performance on the validation set, and update the saved model if its performance is better on the previous ones. If the performance dropped, tell the training to stop. Args: model: The currently trained path-based model. session: The current TensorFlow session. epoch: The epoch number. epoch_loss: The current epoch loss. val_instances: The validation set instances (evaluation between epochs). val_labels: The validation set labels (for evaluation between epochs). saver: tf.Saver object save_path: Where to save the model. best_f1: the best F1 achieved so far. Returns: The F1 achieved on the training set. """ # Evaluate on the validation set val_pred = model.predict(session, val_instances) precision, recall, f1, _ = metrics.precision_recall_fscore_support( val_labels, val_pred, average='weighted') print( 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( epoch + 1, model.hparams.num_epochs, epoch_loss, precision, recall, f1)) if f1 > best_f1: print('Saving model in: %s' % (save_path + 'best.ckpt')) saver.save(session, save_path + 'best.ckpt') print('Model saved in file: %s' % (save_path + 'best.ckpt')) return f1
Example #4
Source File: rnnlm_ops.py From tf_rnnlm with Apache License 2.0 | 5 votes |
def saver(self): if self._saver is None: self._saver = tf.train.Saver() return self._saver
Example #5
Source File: train_utils.py From models with Apache License 2.0 | 5 votes |
def __init__(self, max_number_of_steps=0, num_updates_per_observation=1, num_collect_per_update=1, num_collect_per_meta_update=1, log_every_n_steps=1, policy_save_fn=None, save_policy_every_n_steps=0, should_stop_early=None): """Returns a function that is executed at each step of slim training. Args: max_number_of_steps: Optional maximum number of train steps to take. num_updates_per_observation: Number of updates per observation. log_every_n_steps: The frequency, in terms of global steps, that the loss and global step and logged. policy_save_fn: A tf.Saver().save function to save the policy. save_policy_every_n_steps: How frequently to save the policy. should_stop_early: Optional hook to report whether training should stop. Raises: ValueError: If policy_save_fn is not provided when save_policy_every_n_steps > 0. """ if save_policy_every_n_steps and policy_save_fn is None: raise ValueError( 'policy_save_fn is required when save_policy_every_n_steps > 0') self.max_number_of_steps = max_number_of_steps self.num_updates_per_observation = num_updates_per_observation self.num_collect_per_update = num_collect_per_update self.num_collect_per_meta_update = num_collect_per_meta_update self.log_every_n_steps = log_every_n_steps self.policy_save_fn = policy_save_fn self.save_policy_every_n_steps = save_policy_every_n_steps self.should_stop_early = should_stop_early self.last_global_step_val = 0 self.train_op_fn = None self.collect_and_train_fn = None tf.logging.info('Training for %d max_number_of_steps', self.max_number_of_steps)
Example #6
Source File: db_interface.py From tfutils with MIT License | 5 votes |
def tf_saver(self): if not hasattr(self, '_tf_saver'): self._tf_saver = tf.train.Saver( var_list=self.var_list, *self.tfsaver_args, **self.tfsaver_kwargs) return self._tf_saver
Example #7
Source File: model.py From hart with GNU General Public License v3.0 | 5 votes |
def saver(self, **kwargs): """Returns a Saver for all (trainable and model) variables used by the model. Model variables include e.g. moving mean and average in BatchNorm. :return: tf.Saver """ return tf.train.Saver(self.vars, **kwargs)
Example #8
Source File: learn_path_embeddings.py From models with Apache License 2.0 | 5 votes |
def epoch_completed(model, session, epoch, epoch_loss, val_instances, val_labels, saver, save_path, best_f1): """Runs every time an epoch completes. Print the performance on the validation set, and update the saved model if its performance is better on the previous ones. If the performance dropped, tell the training to stop. Args: model: The currently trained path-based model. session: The current TensorFlow session. epoch: The epoch number. epoch_loss: The current epoch loss. val_instances: The validation set instances (evaluation between epochs). val_labels: The validation set labels (for evaluation between epochs). saver: tf.Saver object save_path: Where to save the model. best_f1: the best F1 achieved so far. Returns: The F1 achieved on the training set. """ # Evaluate on the validation set val_pred = model.predict(session, val_instances) precision, recall, f1, _ = metrics.precision_recall_fscore_support( val_labels, val_pred, average='weighted') print( 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( epoch + 1, model.hparams.num_epochs, epoch_loss, precision, recall, f1)) if f1 > best_f1: save_filename = os.path.join(save_path, 'best.ckpt') print('Saving model in: %s' % save_filename) saver.save(session, save_filename) print('Model saved in file: %s' % save_filename) return f1
Example #9
Source File: learn_path_embeddings.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def epoch_completed(model, session, epoch, epoch_loss, val_instances, val_labels, saver, save_path, best_f1): """Runs every time an epoch completes. Print the performance on the validation set, and update the saved model if its performance is better on the previous ones. If the performance dropped, tell the training to stop. Args: model: The currently trained path-based model. session: The current TensorFlow session. epoch: The epoch number. epoch_loss: The current epoch loss. val_instances: The validation set instances (evaluation between epochs). val_labels: The validation set labels (for evaluation between epochs). saver: tf.Saver object save_path: Where to save the model. best_f1: the best F1 achieved so far. Returns: The F1 achieved on the training set. """ # Evaluate on the validation set val_pred = model.predict(session, val_instances) precision, recall, f1, _ = metrics.precision_recall_fscore_support( val_labels, val_pred, average='weighted') print( 'Epoch: %d/%d, Loss: %f, validation set: P: %.3f, R: %.3f, F1: %.3f\n' % ( epoch + 1, model.hparams.num_epochs, epoch_loss, precision, recall, f1)) if f1 > best_f1: print('Saving model in: %s' % (save_path + 'best.ckpt')) saver.save(session, save_path + 'best.ckpt') print('Model saved in file: %s' % (save_path + 'best.ckpt')) return f1
Example #10
Source File: train_utils.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def __init__(self, max_number_of_steps=0, num_updates_per_observation=1, num_collect_per_update=1, num_collect_per_meta_update=1, log_every_n_steps=1, policy_save_fn=None, save_policy_every_n_steps=0, should_stop_early=None): """Returns a function that is executed at each step of slim training. Args: max_number_of_steps: Optional maximum number of train steps to take. num_updates_per_observation: Number of updates per observation. log_every_n_steps: The frequency, in terms of global steps, that the loss and global step and logged. policy_save_fn: A tf.Saver().save function to save the policy. save_policy_every_n_steps: How frequently to save the policy. should_stop_early: Optional hook to report whether training should stop. Raises: ValueError: If policy_save_fn is not provided when save_policy_every_n_steps > 0. """ if save_policy_every_n_steps and policy_save_fn is None: raise ValueError( 'policy_save_fn is required when save_policy_every_n_steps > 0') self.max_number_of_steps = max_number_of_steps self.num_updates_per_observation = num_updates_per_observation self.num_collect_per_update = num_collect_per_update self.num_collect_per_meta_update = num_collect_per_meta_update self.log_every_n_steps = log_every_n_steps self.policy_save_fn = policy_save_fn self.save_policy_every_n_steps = save_policy_every_n_steps self.should_stop_early = should_stop_early self.last_global_step_val = 0 self.train_op_fn = None self.collect_and_train_fn = None tf.logging.info('Training for %d max_number_of_steps', self.max_number_of_steps)
Example #11
Source File: model_adapter.py From object_detection_kitti with Apache License 2.0 | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)
Example #12
Source File: model_adapter.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)
Example #13
Source File: model_adapter.py From models with Apache License 2.0 | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)
Example #14
Source File: model_adapter.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)
Example #15
Source File: model_adapter.py From object_detection_with_tensorflow with MIT License | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)
Example #16
Source File: TransE.py From KagNet with MIT License | 4 votes |
def init_predict(hs, ts, rs): ''' # (1) Set import files and OpenKE will automatically load models via tf.Saver(). con = Config() # con.set_in_path("OpenKE/benchmarks/FB15K/") con.set_in_path("openke_data/") # con.set_test_link_prediction(True) con.set_test_triple_classification(True) con.set_work_threads(8) con.set_dimension(100) # con.set_import_files("OpenKE/res/model.vec.tf") con.set_import_files("openke_data/embs/glove_initialized/glove.transe.SGD.pt") con.init() con.set_model(models.TransE) con.test() con.predict_triple(hs, ts, rs) # con.show_link_prediction(2,1) # con.show_triple_classification(2,1,3) ''' # (2) Read model parameters from json files and manually load parameters. con = Config() con.set_in_path("./openke_data/") con.set_test_triple_classification(True) con.set_work_threads(8) con.set_dimension(100) con.init() con.set_model(models.TransE) f = open("./openke_data/embs/glove_initialized/glove.transe.SGD.vec.json", "r") content = json.loads(f.read()) f.close() con.set_parameters(content) con.test() # (3) Manually load models via tf.Saver(). # con = config.Config() # con.set_in_path("./benchmarks/FB15K/") # con.set_test_flag(True) # con.set_work_threads(4) # con.set_dimension(50) # con.init() # con.set_model(models.TransE) # con.import_variables("./res/model.vec.tf") # con.test()
Example #17
Source File: db_interface.py From tfutils with MIT License | 4 votes |
def get_restore_vars(self, save_file): """Create the `var_list` init argument to tf.Saver from save_file. Extracts the subset of variables from tf.global_variables that match the name and shape of variables saved in the checkpoint file, and returns these as a list of variables to restore. To support multi-model training, a model prefix is prepended to all tf global_variable names, although this prefix is stripped from all variables before they are saved to a checkpoint. Thus, Args: save_file: path of tf.train.Saver checkpoint. Returns: dict: checkpoint variables. """ reader = tf.train.NewCheckpointReader(save_file) var_shapes = reader.get_variable_to_shape_map() # Map old vars from checkpoint to new vars via load_param_dict. log.info('Saved vars and shapes:\n' + str(var_shapes)) # Specify which vars are to be restored vs. reinitialized. all_vars = self.var_list restore_vars = { name: var for name, var in all_vars.items() \ if name in var_shapes} if self.load_param_dict: # associate checkpoint names with actual variables for ckpt_var_name, curr_var_name in self.load_param_dict.items(): if curr_var_name in all_vars: restore_vars[ckpt_var_name] = all_vars[curr_var_name] restore_vars = self.filter_var_list(restore_vars) if not self.restore_global_step: restore_vars.pop('global_step') # These variables are stored in the checkpoint, # but do not appear in the current graph in_ckpt_not_in_graph = [ \ name \ for name in var_shapes.keys() \ if (name not in all_vars.keys()) and (not any([name.endswith(s) for s in OPTIMIZER_NAMES]))] log.info('Vars in ckpt, not in graph:\n' + str(in_ckpt_not_in_graph)) # Ensure the vars to restored have the correct shape. var_list = {} for name, var in restore_vars.items(): var_shape = var.get_shape().as_list() if var_shape == var_shapes[name]: var_list[name] = var else: log.info('Shape mismatch for %s' % name \ + str(var_shape) \ + str(var_shapes[name])) return var_list
Example #18
Source File: model_adapter.py From Gun-Detector with Apache License 2.0 | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)
Example #19
Source File: model_adapter.py From yolo_v2 with Apache License 2.0 | 4 votes |
def __init__(self, make_loss_and_init_fn): """Wraps a model in the Problem interface. make_loss_and_init argument is a callable that returns a tuple of two other callables as follows. The first will construct most of the graph and return the problem loss. It is essential that this graph contains the totality of the model's variables, but none of its queues. The second will return construct the model initialization graph given a list of parameters and return a callable that is passed an instance of tf.Session, and should initialize the models' parameters. An argument value function would look like this: ```python def make_loss_and_init_fn(): inputs = queued_reader() def make_loss(): return create_model_with_variables(inputs) def make_init_fn(parameters): saver = tf.Saver(parameters) def init_fn(sess): sess.restore(sess, ...) return init_fn return make_loss, make_init_fn ``` Args: make_loss_and_init_fn: a callable, as described aboce """ make_loss_fn, make_init_fn = make_loss_and_init_fn() self.make_loss_fn = make_loss_fn self.parameters, self.constants = _get_variables(make_loss_fn) if make_init_fn is not None: init_fn = make_init_fn(self.parameters + self.constants) else: init_op = tf.initialize_variables(self.parameters + self.constants) init_fn = lambda sess: sess.run(init_op) tf.logging.info("ModelAdapter parameters: %s", [op.name for op in self.parameters]) tf.logging.info("ModelAdapter constants: %s", [op.name for op in self.constants]) super(ModelAdapter, self).__init__( [], random_seed=None, noise_stdev=0.0, init_fn=init_fn)