Python tensorflow.python.training.training_util.write_graph() Examples
The following are 27
code examples of tensorflow.python.training.training_util.write_graph().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.training.training_util
, or try the search function
.
Example #1
Source File: basic_session_run_hooks.py From lambda-packs with MIT License | 6 votes |
def before_run(self, run_context): # pylint: disable=unused-argument if self._timer.last_triggered_step() is None: # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) return SessionRunArgs(self._global_step_tensor)
Example #2
Source File: basic_session_run_hooks.py From keras-lambda with MIT License | 6 votes |
def before_run(self, run_context): # pylint: disable=unused-argument if self._timer.last_triggered_step() is None: # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) return SessionRunArgs(self._global_step_tensor)
Example #3
Source File: basic_session_run_hooks.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 6 votes |
def before_run(self, run_context): # pylint: disable=unused-argument if self._timer.last_triggered_step() is None: # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) return SessionRunArgs(self._global_step_tensor)
Example #4
Source File: basic_session_run_hooks.py From deep_image_model with Apache License 2.0 | 6 votes |
def before_run(self, run_context): # pylint: disable=unused-argument if self._timer.last_triggered_step() is None: # Write graph in the first call. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._saver.saver_def if self._saver else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) return SessionRunArgs(self._global_step_tensor)
Example #5
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #6
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def end(self, session): if self._save_thread: logging.info("Waiting for any pending checkpoints to finish.") self._save_thread.join() if self._write_graph_thread: logging.info("Waiting for any pending write_graph to finish.") self._write_graph_thread.join() last_step = session.run(self._global_step_tensor) # Save the last checkpoint synchronously if needed. if last_step != self._timer.last_triggered_step(): self._save(session, last_step, asynchronous=False) for l in self._listeners: l.end(session, last_step)
Example #7
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #8
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def end(self, session): if self._save_thread: logging.info("Waiting for any pending checkpoints to finish.") self._save_thread.join() if self._write_graph_thread: logging.info("Waiting for any pending write_graph to finish.") self._write_graph_thread.join() last_step = session.run(self._global_step_tensor) # Save the last checkpoint synchronously if needed. if last_step != self._timer.last_triggered_step(): self._save(session, last_step, asynchronous=False) for l in self._listeners: l.end(session, last_step)
Example #9
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #10
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #11
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def end(self, session): if self._save_thread: logging.info("Waiting for any pending checkpoints to finish.") self._save_thread.join() if self._write_graph_thread: logging.info("Waiting for any pending write_graph to finish.") self._write_graph_thread.join() last_step = session.run(self._global_step_tensor) # Save the last checkpoint synchronously if needed. if last_step != self._timer.last_triggered_step(): self._save(session, last_step, asynchronous=False) for l in self._listeners: l.end(session, last_step)
Example #12
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #13
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def end(self, session): if self._save_thread: logging.info("Waiting for any pending checkpoints to finish.") self._save_thread.join() if self._write_graph_thread: logging.info("Waiting for any pending write_graph to finish.") self._write_graph_thread.join() last_step = session.run(self._global_step_tensor) # Save the last checkpoint synchronously if needed. if last_step != self._timer.last_triggered_step(): self._save(session, last_step, asynchronous=False) for l in self._listeners: l.end(session, last_step)
Example #14
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #15
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #16
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def end(self, session): if self._save_thread: logging.info("Waiting for any pending checkpoints to finish.") self._save_thread.join() if self._write_graph_thread: logging.info("Waiting for any pending write_graph to finish.") self._write_graph_thread.join() last_step = session.run(self._global_step_tensor) # Save the last checkpoint synchronously if needed. if last_step != self._timer.last_triggered_step(): self._save(session, last_step, asynchronous=False) for l in self._listeners: l.end(session, last_step)
Example #17
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #18
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def end(self, session): if self._save_thread: logging.info("Waiting for any pending checkpoints to finish.") self._save_thread.join() if self._write_graph_thread: logging.info("Waiting for any pending write_graph to finish.") self._write_graph_thread.join() last_step = session.run(self._global_step_tensor) # Save the last checkpoint synchronously if needed. if last_step != self._timer.last_triggered_step(): self._save(session, last_step, asynchronous=False) for l in self._listeners: l.end(session, last_step)
Example #19
Source File: async_checkpoint.py From training_results_v0.5 with Apache License 2.0 | 6 votes |
def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. def _write_graph_fn(self): training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") self._write_graph_thread = threading.Thread(target=_write_graph_fn, args=[self]) self._write_graph_thread.start() saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) # The checkpoint saved here is the state at step "global_step". self._save(session, global_step) self._timer.update_last_triggered_step(global_step)
Example #20
Source File: basic_session_run_hooks.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def before_run(self, run_context): # pylint: disable=unused-argument if self._timer.last_triggered_step() is None: # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") saver_def = self._get_saver().saver_def if self._get_saver() else None graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def) self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) return SessionRunArgs(self._global_step_tensor)
Example #21
Source File: supervisor.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _write_graph(self): """Writes graph_def to `logdir` and adds it to summary if applicable.""" assert self._is_chief if self._logdir: training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt") if self._summary_writer and not self._graph_added_to_summary: self._summary_writer.add_graph(self._graph) self._summary_writer.add_meta_graph(self._meta_graph_def) self._graph_added_to_summary = True
Example #22
Source File: supervisor.py From deep_image_model with Apache License 2.0 | 5 votes |
def _write_graph(self): """Writes graph_def to `logdir` and adds it to summary if applicable.""" assert self._is_chief if self._logdir: training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt") if self._summary_writer and not self._graph_added_to_summary: self._summary_writer.add_graph(self._graph) self._summary_writer.add_meta_graph(self._meta_graph_def) self._graph_added_to_summary = True
Example #23
Source File: hooks.py From NJUNMT-tf with Apache License 2.0 | 5 votes |
def before_run(self, run_context): """ Dumps graphs and loads checkpoint if there exits. Called before each call to run(). Args: run_context: A `SessionRunContext` object. Returns: A `SessionRunArgs` object containing global_step. """ # We do write graph and saver_def at the first call of before_run. # We cannot do this in begin, since we let other hooks to change graph and # add variables in begin. Graph is finalized after all begin calls. if self._is_chief and self._first_call: training_util.write_graph( ops.get_default_graph().as_graph_def(add_shapes=True), self._checkpoint_dir, "graph.pbtxt") # dump model details "model_analysis.txt" dump_model_analysis(self._checkpoint_dir) # dump model configs graph = ops.get_default_graph() meta_graph_def = meta_graph.create_meta_graph_def( graph_def=graph.as_graph_def(add_shapes=True), saver_def=self._saver.saver_def) if self._summary_writer is not None: self._summary_writer.add_graph(graph) self._summary_writer.add_meta_graph(meta_graph_def) tf.logging.info("CheckpointSaverHook (before_run): dump graph...") self._first_call = False return tf.train.SessionRunArgs(self._global_step)
Example #24
Source File: supervisor.py From lambda-packs with MIT License | 5 votes |
def _write_graph(self): """Writes graph_def to `logdir` and adds it to summary if applicable.""" assert self._is_chief if self._logdir: training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt") if self._summary_writer and not self._graph_added_to_summary: self._summary_writer.add_graph(self._graph) self._summary_writer.add_meta_graph(self._meta_graph_def) self._graph_added_to_summary = True
Example #25
Source File: supervisor.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _write_graph(self): """Writes graph_def to `logdir` and adds it to summary if applicable.""" assert self._is_chief if self._logdir: training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt") if self._summary_writer and not self._graph_added_to_summary: self._summary_writer.add_graph(self._graph) self._summary_writer.add_meta_graph(self._meta_graph_def) self._graph_added_to_summary = True
Example #26
Source File: supervisor.py From ctw-baseline with MIT License | 5 votes |
def _write_graph(self): """Writes graph_def to `logdir` and adds it to summary if applicable.""" assert self._is_chief if self._logdir: training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt") if self._summary_writer and not self._graph_added_to_summary: self._summary_writer.add_graph(self._graph) self._summary_writer.add_meta_graph(self._meta_graph_def) self._graph_added_to_summary = True
Example #27
Source File: supervisor.py From keras-lambda with MIT License | 5 votes |
def _write_graph(self): """Writes graph_def to `logdir` and adds it to summary if applicable.""" assert self._is_chief if self._logdir: training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, "graph.pbtxt") if self._summary_writer and not self._graph_added_to_summary: self._summary_writer.add_graph(self._graph) self._summary_writer.add_meta_graph(self._meta_graph_def) self._graph_added_to_summary = True