Python tensorflow.python.client.timeline.Timeline() Examples
The following are 30
code examples of tensorflow.python.client.timeline.Timeline().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.client.timeline
, or try the search function
.
Example #1
Source File: tf_distributions.py From MJHMC with GNU General Public License v2.0 | 6 votes |
def dEdX_val(self, X): with self.graph.as_default(), tf.device(self.grad_device): if self.prof_run: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() grad = self.sess.run(self.grad_op, feed_dict={self.state_pl: X}, options=run_options, run_metadata=run_metadata) tf_tl = timeline.Timeline(run_metadata.step_stats) ctf = tf_tl.generate_chrome_trace_format() log_path = expanduser('~/tmp/logs/tf_{}_grad_timeline_{}.json'.format(self.name, time.time())) with open(log_path, 'w') as log_file: log_file.write(ctf) else: grad = self.sess.run(self.grad_op, feed_dict={self.state_pl: X}) return grad
Example #2
Source File: utils.py From deepwriting with MIT License | 6 votes |
def create_tf_timeline(model_dir, run_metadata): """ This is helpful for profiling slow Tensorflow code. Args: model_dir: run_metadata: Returns: """ tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() timeline_file_path = os.path.join(model_dir,'timeline.json') with open(timeline_file_path, 'w') as f: f.write(ctf)
Example #3
Source File: train.py From GraphSAINT with MIT License | 6 votes |
def evaluate_full_batch(sess,model,minibatch_iter,many_runs_timeline,mode): """ Full batch evaluation NOTE: HERE GCN RUNS THROUGH THE FULL GRAPH. HOWEVER, WE CALCULATE F1 SCORE FOR VALIDATION / TEST NODES ONLY. """ options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() t1 = time.time() num_cls = minibatch_iter.class_arr.shape[-1] feed_dict, labels = minibatch_iter.feed_dict(mode) if args_global.timeline: preds,loss = sess.run([model.preds, model.loss], feed_dict=feed_dict, options=options, run_metadata=run_metadata) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() many_runs_timeline.append(chrome_trace) else: preds,loss = sess.run([model.preds, model.loss], feed_dict=feed_dict) node_val_test = minibatch_iter.node_val if mode=='val' else minibatch_iter.node_test t2 = time.time() f1_scores = calc_f1(labels[node_val_test],preds[node_val_test],model.sigmoid_loss) return loss, f1_scores[0], f1_scores[1], (t2-t1)
Example #4
Source File: cli.py From mayo with MIT License | 6 votes |
def cli_profile_timeline(self): """Performs training profiling to produce timeline.json. """ # TODO integrate this into Profile. from tensorflow.python.client import timeline options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() session = self._get_session('train') # run 100 iterations to warm up max_iterations = 100 for i in range(max_iterations): log.info( 'Running {}/{} iterations to warm up...' .format(i, max_iterations), update=True) session.run(session._train_op) log.info('Running the final iteration to generate timeline...') session.run( session._train_op, options=options, run_metadata=run_metadata) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(chrome_trace)
Example #5
Source File: utils.py From stcn with GNU General Public License v3.0 | 6 votes |
def create_tf_timeline(model_dir, run_metadata): """ This is helpful for profiling slow Tensorflow code. Args: model_dir: run_metadata: Returns: """ tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() timeline_file_path = os.path.join(model_dir,'timeline.json') with open(timeline_file_path, 'w') as f: f.write(ctf)
Example #6
Source File: profile.py From lang2program with Apache License 2.0 | 6 votes |
def run(self, fetches, feed_dict=None): """like Session.run, but return a Timeline object in Chrome trace format (JSON). Save the json to a file, go to chrome://tracing, and open the file. Args: fetches feed_dict Returns: dict: a JSON dict """ options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() super(ProfiledSession, self).run(fetches, feed_dict, options=options, run_metadata=run_metadata) # Create the Timeline object, and write it to a json tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() return json.loads(ctf)
Example #7
Source File: profile.py From lang2program with Apache License 2.0 | 6 votes |
def run(self, fetches, feed_dict=None): """like Session.run, but return a Timeline object in Chrome trace format (JSON). Save the json to a file, go to chrome://tracing, and open the file. Args: fetches feed_dict Returns: dict: a JSON dict """ options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() super(ProfiledSession, self).run(fetches, feed_dict, options=options, run_metadata=run_metadata) # Create the Timeline object, and write it to a json tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() return json.loads(ctf)
Example #8
Source File: tf_distributions.py From MJHMC with GNU General Public License v2.0 | 6 votes |
def E_val(self, X): with self.graph.as_default(), tf.device(self.energy_device): if self.prof_run: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() energy = self.sess.run(self.energy_op, feed_dict={self.state_pl: X}, options=run_options, run_metadata=run_metadata) tf_tl = timeline.Timeline(run_metadata.step_stats) ctf = tf_tl.generate_chrome_trace_format() log_path = expanduser('~/tmp/logs/tf_{}_energy_timeline_{}.json'.format(self.name, time.time())) with open(log_path, 'w') as log_file: log_file.write(ctf) else: energy = self.sess.run(self.energy_op, feed_dict={self.state_pl: X}) return energy
Example #9
Source File: utils.py From dip18 with GNU General Public License v3.0 | 6 votes |
def create_tf_timeline(model_dir, run_metadata): """ This is helpful for profiling slow Tensorflow code. Args: model_dir: run_metadata: Returns: """ tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() timeline_file_path = os.path.join(model_dir,'timeline.json') with open(timeline_file_path, 'w') as f: f.write(ctf)
Example #10
Source File: util.py From am3 with Apache License 2.0 | 6 votes |
def profiled_run(sess, ops, feed_dict, is_profiling=False, log_dir=None): if not is_profiling: return sess.run(ops, feed_dict=feed_dict) else: if log_dir is None: raise ValueError("You need to provide a log_dir for profiling.") run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() outputs = sess.run(ops, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) # Create the Timeline object, and write it to a json tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open(os.path.join(log_dir, 'timeline.json'), 'w') as f: f.write(ctf) return outputs
Example #11
Source File: test_tf2_execution.py From checkmate with Apache License 2.0 | 6 votes |
def save_checkpoint_chrome_trace(dataset: str, model_name: str, log_base: PathLike, batch_size: int = 32): def trace_solver_solution(save_path: PathLike, train_ds, solver): import tensorflow.compat.v1 as tf1 from tensorflow.python.client import timeline data_iter = train_ds.__iter__() data_list = [x.numpy() for x in data_iter.next()] with tf1.Session() as sess: sqrtn_fn, *_ = _build_model_via_solver(dataset, model_name, train_ds.element_spec, solver) out = sqrtn_fn(*[tf1.convert_to_tensor(x) for x in data_list]) run_meta = tf1.RunMetadata() sess.run(tf1.global_variables_initializer()) sess.run(out, options=tf1.RunOptions(trace_level=tf1.RunOptions.FULL_TRACE), run_metadata=run_meta) t1 = timeline.Timeline(run_meta.step_stats) lctf = t1.generate_chrome_trace_format() with Path(save_path).open("w") as f: f.write(lctf) log_base = Path(log_base) log_base.mkdir(parents=True, exist_ok=True) train_ds, test_ds = get_data(dataset, batch_size=batch_size) trace_solver_solution(log_base / "check_all.json", train_ds, solve_checkpoint_all) trace_solver_solution(log_base / "check_sqrtn_noap.json", train_ds, solve_chen_sqrtn_noap)
Example #12
Source File: base_runner.py From graph-partition-neural-network-samples with MIT License | 6 votes |
def _get(self, feed_data, op_names): """ Get results of one mini-batch """ ops = [self._model.ops[nn] for nn in op_names] # profile code if self._is_profile: op_results = self._session.run( ops, feed_dict=feed_data, options=self._run_options, run_metadata=self._run_metadata) trace = timeline.Timeline(self._run_metadata.step_stats) chrome_trace = trace.generate_chrome_trace_format() with open(os.path.join(self._save_dir, 'timeline.json'), 'w') as f: f.write(chrome_trace) else: op_results = self._session.run(ops, feed_dict=feed_data) results = {} for rr, name in zip(op_results, op_names): results[name] = rr return results
Example #13
Source File: evaluation.py From pycodesuggest with MIT License | 6 votes |
def profile(self, session): evals = [self.model.cost] for batch in self.batcher: state, att_states, att_ids, att_counts = get_initial_state(self.model) for seq_batch in self.batcher.sequence_iterator(batch): feed_dict = construct_feed_dict(self.model, seq_batch, state, att_states, att_ids, att_counts) run_metadata = tf.RunMetadata() session.run(evals, feed_dict=feed_dict, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata) break from tensorflow.python.client import timeline trace = timeline.Timeline(step_stats=run_metadata.step_stats) trace_file = open('timeline.ctf.json', 'w') trace_file.write(trace.generate_chrome_trace_format())
Example #14
Source File: Trainer.py From PReMVOS with MIT License | 5 votes |
def handle_run_metadata(self, metadata, step): if metadata is None: return if not self.profile: return self.summary_writer.add_run_metadata(metadata, "profile%d" % step, step) #leave a few steps for warmup and then write out at the 10th step if step == 10: from tensorflow.python.client import timeline tl = timeline.Timeline(metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(ctf)
Example #15
Source File: profile_session_run_hooks.py From aster with MIT License | 5 votes |
def after_run(self, run_context, run_values): global_step = run_values.results - 1 if self._do_profile: self._do_profile = False self._writer.add_run_metadata(run_values.run_metadata, 'trace_{}'.format(global_step), global_step) timeline_object = timeline.Timeline(run_values.run_metadata.step_stats) chrome_trace = timeline_object.generate_chrome_trace_format() chrome_trace_save_path = 'timeline_{}.json'.format(global_step) with open(chrome_trace_save_path, 'w') as f: f.write(chrome_trace) logging.info('Profile trace saved to {}'.format(chrome_trace_save_path)) if global_step == self._at_step: self._do_profile = True
Example #16
Source File: prof.py From tensorpack with Apache License 2.0 | 5 votes |
def _write_tracing(self, metadata): from tensorflow.python.client import timeline tl = timeline.Timeline(step_stats=metadata.step_stats) fname = os.path.join( self._dir, 'chrome-trace-{}.json'.format(self.global_step)) with open(fname, 'w') as f: f.write(tl.generate_chrome_trace_format( show_dataflow=True, show_memory=True))
Example #17
Source File: Trainer.py From TrackR-CNN with MIT License | 5 votes |
def _step(self, network, feed_dict, ops, summary_op, extraction_keys, step_number): if feed_dict is None: feed_dict = {} if summary_op is not None: ops["summaries"] = summary_op if self.collect_run_metadata: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None if len(extraction_keys) > 0: ops[Extractions.EXTRACTIONS] = [{k: [v] for k, v in extractions.items() if k in extraction_keys} for extractions in network.tower_extractions] res = self.session.run(ops, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata) if "summaries" in res: summary_str = res["summaries"] del res["summaries"] else: summary_str = None if step_number is None: step_number = res["global_step"] if step_number % self.summary_interval == 0 and self.summary_writer is not None: if self.collect_run_metadata and step_number > 50: # this is experimental, TODO: make this cleaner # the 50 is to allow for some warmup self.summary_writer.add_run_metadata(run_metadata, tag="timing", global_step=step_number) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open('timing.json', 'w') as f: f.write(chrome_trace) if summary_str is not None: self.summary_writer.add_summary(summary_str, global_step=step_number) res[Measures.MEASURES] = accumulate_measures({}, *res[Measures.MEASURES]) if len(extraction_keys) > 0: res[Extractions.EXTRACTIONS] = accumulate_extractions({}, *res[Extractions.EXTRACTIONS]) return res
Example #18
Source File: prof.py From ADL with MIT License | 5 votes |
def _write_tracing(self, metadata): from tensorflow.python.client import timeline tl = timeline.Timeline(step_stats=metadata.step_stats) fname = os.path.join( self._dir, 'chrome-trace-{}.json'.format(self.global_step)) with open(fname, 'w') as f: f.write(tl.generate_chrome_trace_format( show_dataflow=True, show_memory=True))
Example #19
Source File: tf_run_builder.py From ray with Apache License 2.0 | 5 votes |
def run_timeline(sess, ops, debug_name, feed_dict={}, timeline_dir=None): if timeline_dir: from tensorflow.python.client import timeline run_options = tf1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf1.RunMetadata() start = time.time() fetches = sess.run( ops, options=run_options, run_metadata=run_metadata, feed_dict=feed_dict) trace = timeline.Timeline(step_stats=run_metadata.step_stats) global _count outf = os.path.join( timeline_dir, "timeline-{}-{}-{}.json".format( debug_name, os.getpid(), _count % 10)) _count += 1 trace_file = open(outf, "w") logger.info("Wrote tf timeline ({} s) to {}".format( time.time() - start, os.path.abspath(outf))) trace_file.write(trace.generate_chrome_trace_format()) else: if log_once("tf_timeline"): logger.info( "Executing TF run without tracing. To dump TF timeline traces " "to disk, set the TF_TIMELINE_DIR environment variable.") fetches = sess.run(ops, feed_dict=feed_dict) return fetches
Example #20
Source File: tf_run_builder.py From ray with Apache License 2.0 | 5 votes |
def run_timeline(sess, ops, debug_name, feed_dict={}, timeline_dir=None): if timeline_dir: from tensorflow.python.client import timeline run_options = tf1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf1.RunMetadata() start = time.time() fetches = sess.run( ops, options=run_options, run_metadata=run_metadata, feed_dict=feed_dict) trace = timeline.Timeline(step_stats=run_metadata.step_stats) global _count outf = os.path.join( timeline_dir, "timeline-{}-{}-{}.json".format( debug_name, os.getpid(), _count % 10)) _count += 1 trace_file = open(outf, "w") logger.info("Wrote tf timeline ({} s) to {}".format( time.time() - start, os.path.abspath(outf))) trace_file.write(trace.generate_chrome_trace_format()) else: if log_once("tf_timeline"): logger.info( "Executing TF run without tracing. To dump TF timeline traces " "to disk, set the TF_TIMELINE_DIR environment variable.") fetches = sess.run(ops, feed_dict=feed_dict) return fetches
Example #21
Source File: basic_session_run_hooks.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _save(self, step, save_path, step_stats): logging.info("Saving timeline for %d into '%s'.", step, save_path) with gfile.Open(save_path, "w") as f: trace = timeline.Timeline(step_stats) f.write( trace.generate_chrome_trace_format( show_dataflow=self._show_dataflow, show_memory=self._show_memory))
Example #22
Source File: profiler_hook.py From lambda-packs with MIT License | 5 votes |
def _save(self, step, save_path, step_stats): logging.info("Saving timeline for %d into '%s'.", step, save_path) with gfile.Open(save_path, "w") as f: trace = timeline.Timeline(step_stats) f.write(trace.generate_chrome_trace_format( show_dataflow=self._show_dataflow, show_memory=self._show_memory))
Example #23
Source File: tensorflow_executor.py From rlgraph with Apache License 2.0 | 5 votes |
def update_timeline_if_necessary(self): """ Writes a timeline json file according to specification. """ if self.timeline_step % self.timeline_frequency == 0: fetched_timeline = timeline.Timeline(self.run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open("timeline_{:02d}.json".format(self.timeline_step), "w") as f: f.write(chrome_trace) self.timeline_step += 1
Example #24
Source File: helper.py From realtime_object_detection with MIT License | 5 votes |
def write_timeline(self,step_stats,file_name): fetched_timeline = timeline.Timeline(step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open(file_name, 'w') as f: f.write(chrome_trace)
Example #25
Source File: timeline_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSimpleTimeline(self): run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() with tf.device('/cpu:0'): with tf.Session() as sess: sess.run( tf.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf)
Example #26
Source File: prof.py From petridishnn with MIT License | 5 votes |
def _write_tracing(self, metadata): tl = timeline.Timeline(step_stats=metadata.step_stats) fname = os.path.join( self._dir, 'chrome-trace-{}.json'.format(self.global_step)) with open(fname, 'w') as f: f.write(tl.generate_chrome_trace_format( show_dataflow=True, show_memory=True))
Example #27
Source File: train.py From alternating-reader-tf with MIT License | 5 votes |
def trace(config, sess, model, train_data): run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() X, Q, Y = random_batch(*train_data, config.batch_size) model.batch_fit(X, Q, Y, learning_rate, run_options, run_metadata) train_writer.add_run_metadata(run_metadata, 'step%d' % step) from tensorflow.python.client import timeline tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(ctf) return
Example #28
Source File: timeline_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testTimelineCpu(self): run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() with self.test_session(use_gpu=False) as sess: const1 = tf.constant(1.0, name='const1') const2 = tf.constant(2.0, name='const2') result = tf.add(const1, const2) + const1 * const2 sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) step_stats = run_metadata.step_stats devices = [d.device for d in step_stats.dev_stats] self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_dataflow=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False, show_dataflow=False) self._validateTrace(ctf)
Example #29
Source File: timeline_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testTimelineGpu(self): if not tf.test.is_gpu_available(): return run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() with self.test_session(force_gpu=True) as sess: const1 = tf.constant(1.0, name='const1') const2 = tf.constant(2.0, name='const2') result = tf.add(const1, const2) + const1 * const2 sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) step_stats = run_metadata.step_stats devices = [d.device for d in step_stats.dev_stats] self.assertTrue('/job:localhost/replica:0/task:0/gpu:0' in devices) self.assertTrue('/gpu:0/stream:all' in devices) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_dataflow=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False, show_dataflow=False) self._validateTrace(ctf)
Example #30
Source File: timeline_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testAnalysisAndAllocations(self): run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() config = tf.ConfigProto(device_count={'CPU': 3}) with tf.Session(config=config) as sess: with tf.device('/cpu:0'): const1 = tf.constant(1.0, name='const1') with tf.device('/cpu:1'): const2 = tf.constant(2.0, name='const2') with tf.device('/cpu:2'): result = const1 + const2 + const1 * const2 sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) tl = timeline.Timeline(run_metadata.step_stats) step_analysis = tl.analyze_step_stats() ctf = step_analysis.chrome_trace.format_to_string() self._validateTrace(ctf) maximums = step_analysis.allocator_maximums self.assertTrue('cpu' in maximums) cpu_max = maximums['cpu'] # At least const1 + const2, both float32s (4 bytes each) self.assertGreater(cpu_max.num_bytes, 8) self.assertGreater(cpu_max.timestamp, 0) self.assertTrue('const1' in cpu_max.tensors) self.assertTrue('const2' in cpu_max.tensors)