Python tensorflow.scalar_summary() Examples
The following are 30
code examples of tensorflow.scalar_summary().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: summary.py From VDAIC2017 with MIT License | 6 votes |
def summary_moving_average(): """ Create a MovingAverage op and summary for all variables in MOVING_SUMMARY_VARS_KEY. :returns: a op to maintain these average. """ with tf.name_scope('EMA_summary'): global_step_var = get_global_step_var() with tf.name_scope(None): averager = tf.train.ExponentialMovingAverage( 0.99, num_updates=global_step_var, name='EMA') vars_to_summary = tf.get_collection(MOVING_SUMMARY_VARS_KEY) avg_maintain_op = averager.apply(vars_to_summary) for idx, c in enumerate(vars_to_summary): # TODO assert scalar name = re.sub('tower[p0-9]+/', '', c.op.name) tf.scalar_summary(name, averager.average(c)) return avg_maintain_op
Example #2
Source File: neural_network.py From sentiment-analysis-tensorflow with Apache License 2.0 | 6 votes |
def variable_summaries(var, name): """ Attach a lot of summaries to a Tensor for Tensorboard visualization. Ref: https://www.tensorflow.org/versions/r0.11/how_tos/summaries_and_tensorboard/index.html :param var: Variable to summarize :param name: Summary name """ with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.scalar_summary('mean/' + name, mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.scalar_summary('stddev/' + name, stddev) tf.scalar_summary('max/' + name, tf.reduce_max(var)) tf.scalar_summary('min/' + name, tf.reduce_min(var)) tf.histogram_summary(name, var)
Example #3
Source File: human_pose_nn.py From gait-recognition with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _init_summaries(self): if self.is_train: logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train') self.summary_writer = tf.summary.FileWriter(logdir) self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i)) for i in range(16)] tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES]) for i in range(16): tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i], collections = [KEY_SUMMARIES_PER_JOINT[i]]) self.create_summary_from_weights() self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES) self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)] else: logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test') self.summary_writer = tf.summary.FileWriter(logdir)
Example #4
Source File: actor_learner.py From async-deep-rl with Apache License 2.0 | 6 votes |
def setup_summaries(self): episode_reward = tf.Variable(0.) s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward) if self.alg_type == "a3c": summary_vars = [episode_reward] else: episode_ave_max_q = tf.Variable(0.) s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q) logged_epsilon = tf.Variable(0.) s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon) summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon] summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))] update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))] with tf.control_dependencies(update_ops): summary_ops = tf.merge_all_summaries() return summary_placeholders, update_ops, summary_ops
Example #5
Source File: cifar10.py From deep_image_model with Apache License 2.0 | 6 votes |
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measures the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Example #6
Source File: ranknet.py From tfranknet with GNU General Public License v2.0 | 6 votes |
def _setup_training(self): """ Set up a data flow graph for fine tuning """ layer_num = self.layer_num act_func = ACTIVATE_FUNC[self.activate_func] sigma = self.sigma lr = self.learning_rate weights = self.weights biases = self.biases data1, data2 = self.data1, self.data2 batch_size = self.batch_size optimizer = OPTIMIZER[self.optimizer] with tf.name_scope("training"): s1 = self._obtain_score(data1, weights, biases, act_func, "1") s2 = self._obtain_score(data2, weights, biases, act_func, "2") with tf.name_scope("cost"): sum_cost = tf.reduce_sum(tf.log(1 + tf.exp(-sigma*(s1-s2)))) self.cost = cost = sum_cost / batch_size self.optimize = optimizer(lr).minimize(cost) for n in range(layer_num-1): tf.histogram_summary("weight"+str(n), weights[n]) tf.histogram_summary("bias"+str(n), biases[n]) tf.scalar_summary("cost", cost)
Example #7
Source File: cifar10.py From deep_image_model with Apache License 2.0 | 6 votes |
def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
Example #8
Source File: kmeans.py From deep_image_model with Apache License 2.0 | 6 votes |
def _get_train_ops(self, features, _): (_, _, losses, training_op) = clustering_ops.KMeans( self._parse_tensor_or_dict(features), self._num_clusters, self._training_initial_clusters, self._distance_metric, self._use_mini_batch, random_seed=self._random_seed, kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries ).training_graph() incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1) self._loss = tf.reduce_sum(losses) tf.scalar_summary('loss/raw', self._loss) training_op = with_dependencies([training_op, incr_step], self._loss) return training_op, self._loss
Example #9
Source File: LSPModels.py From deeppose with GNU General Public License v3.0 | 6 votes |
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % LSPGlobals.TOWER_NAME, '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Example #10
Source File: LSPModels.py From deeppose with GNU General Public License v3.0 | 6 votes |
def _add_loss_summaries(total_loss): """Add summaries for losses in DeepPose model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
Example #11
Source File: statistic.py From gated-pixel-cnn with Apache License 2.0 | 6 votes |
def __init__(self, sess, data, runtime_base_dir, model_dir, variables, max_to_keep=20): self.sess = sess self.reset() with tf.variable_scope('t'): self.t_op = tf.Variable(0, trainable=False, name='t') self.t_add_op = self.t_op.assign_add(1) self.model_dir = os.path.join(runtime_base_dir, model_dir) self.saver = tf.train.Saver(variables + [self.t_op], max_to_keep=max_to_keep) self.writer = tf.train.SummaryWriter('%s/logs/%s' % (runtime_base_dir, model_dir), self.sess.graph) with tf.variable_scope('summary'): scalar_summary_tags = ['train_l', 'test_l'] self.summary_placeholders = {} self.summary_ops = {} for tag in scalar_summary_tags: self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_')) self.summary_ops[tag] = tf.scalar_summary('%s/%s' % (data, tag), self.summary_placeholders[tag])
Example #12
Source File: cifar10.py From ml with Apache License 2.0 | 6 votes |
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) # tf.histogram_summary(tensor_name + '/activations', x) tf.summary.histogram(tensor_name + '/activations', x) # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Example #13
Source File: cifar10.py From ml with Apache License 2.0 | 6 votes |
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) # tf.histogram_summary(tensor_name + '/activations', x) tf.summary.histogram(tensor_name + '/activations', x) # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Example #14
Source File: deepaudio.py From iLID with MIT License | 6 votes |
def accuracy(logits, dense_labels): seen_german = tf.Variable(0, trainable=False) seen_english = tf.Variable(0, trainable=False) x = tf.nn.softmax(logits) correct_pred = tf.equal(tf.argmax(x, 1), tf.argmax(dense_labels, 1)) german_samples = tf.equal(tf.constant(1, dtype="int64"), tf.argmax(dense_labels, 1)) german_accuracy = tf.reduce_mean(tf.cast(tf.gather(correct_pred, tf.where(german_samples)), tf.float32)) sum_german_samples = seen_german.assign_add(tf.reduce_sum(tf.cast(tf.gather(dense_labels, tf.where(german_samples)), tf.int32))) tf.scalar_summary("german_accuracy", german_accuracy) english_samples = tf.equal(tf.constant(0, dtype="int64"), tf.argmax(dense_labels, 1)) english_accuracy = tf.reduce_mean(tf.cast(tf.gather(correct_pred, tf.where(english_samples)), tf.float32)) sum_english_samples = seen_english.assign_add(tf.reduce_sum(tf.cast(tf.gather(dense_labels, tf.where(english_samples)), tf.int32))) tf.scalar_summary("english_accuracy", english_accuracy) german_predictions = tf.equal(tf.constant(1, dtype="int64"), tf.argmax(x, 1)) german_predictions_count = tf.reduce_sum(tf.cast(german_predictions, tf.int32)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) tf.scalar_summary("accuracy", accuracy) return accuracy, english_accuracy, german_accuracy, german_predictions_count, sum_english_samples, sum_german_samples
Example #15
Source File: trainer.py From StackGAN with MIT License | 6 votes |
def define_summaries(self): '''Helper function for init_opt''' all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []} for k, v in self.log_vars: if k.startswith('g'): all_sum['g'].append(tf.scalar_summary(k, v)) elif k.startswith('d'): all_sum['d'].append(tf.scalar_summary(k, v)) elif k.startswith('hr_g'): all_sum['hr_g'].append(tf.scalar_summary(k, v)) elif k.startswith('hr_d'): all_sum['hr_d'].append(tf.scalar_summary(k, v)) elif k.startswith('hist'): all_sum['hist'].append(tf.histogram_summary(k, v)) self.g_sum = tf.merge_summary(all_sum['g']) self.d_sum = tf.merge_summary(all_sum['d']) self.hr_g_sum = tf.merge_summary(all_sum['hr_g']) self.hr_d_sum = tf.merge_summary(all_sum['hr_d']) self.hist_sum = tf.merge_summary(all_sum['hist'])
Example #16
Source File: model.py From web_page_classification with MIT License | 6 votes |
def _activation_summary(self, x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. # Error: these summaries cause high classifier error!!! # All inputs to node MergeSummary/MergeSummary must be from the same frame. # tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name) # tf.histogram_summary(tensor_name + '/activations', x) # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Example #17
Source File: selector.py From dynamic-coattention-network with MIT License | 6 votes |
def _add_train_op(self): params = self._params self._lr_rate = tf.maximum( params.min_lr, tf.train.exponential_decay(params.lr, self._global_step, 30000, 0.98)) tvars = tf.trainable_variables() # use reserved gpu for gradient computation with tf.device(self._get_gpu(self._num_gpus-1)): grads, global_norm = tf.clip_by_global_norm( tf.gradients(self._loss, tvars), params.max_grad_norm) tf.scalar_summary('global_norm', global_norm) optimizer = tf.train.AdamOptimizer(self._lr_rate) tf.scalar_summary('learning rate', self._lr_rate) with tf.device(self._next_device()): self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=self._global_step, name='train_step') self._summaries = tf.merge_all_summaries() return self._train_op, self._loss,
Example #18
Source File: deepaudio.py From iLID with MIT License | 6 votes |
def _add_loss_summaries(total_loss): """Add summaries for losses in model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
Example #19
Source File: summary.py From VDAIC2017 with MIT License | 5 votes |
def add_activation_summary(x, name=None): """ Add summary to graph for an activation tensor x. If name is None, use x.name. """ ndim = x.get_shape().ndims assert ndim >= 2, \ "Summary a scalar with histogram? Maybe use scalar instead. FIXME!" if name is None: name = x.name with tf.name_scope('act_summary'): tf.histogram_summary(name + '/activation', x) tf.scalar_summary(name + '/activation_sparsity', tf.nn.zero_fraction(x)) tf.scalar_summary( name + '/activation_rms', rms(x))
Example #20
Source File: network.py From iLID with MIT License | 5 votes |
def set_activation_summary(self): '''Log each layers activations and sparsity.''' tf.image_summary("input images", self.input_layer.output, max_images=100) for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) for layer in self.hidden_layers: tf.histogram_summary(layer.name + '/activations', layer.output) tf.scalar_summary(layer.name + '/sparsity', tf.nn.zero_fraction(layer.output))
Example #21
Source File: item2vec.py From board-yet with GNU General Public License v3.0 | 5 votes |
def init_graphs(self): # TODO(carson): Get variables representing batches and labels self.batch = tf.placeholder(dtype=tf.int32, shape=[None, self.batch_size]) self.labels = tf.placeholder(dtype=tf.int32, shape=[None, self.batch_size]) batch_logits, neg_logits = self.build_training_graph(self.batch, self.labels) self.loss = self.loss_function(batch_logits, neg_logits) tf.scalar_summary("NCE loss", self.loss) self.train_node = self.build_optimize_graph(self.loss) tf.initialize_all_variables().run() self.saver = tf.train.Saver()
Example #22
Source File: net_utils.py From scene-graph-TF-release with MIT License | 5 votes |
def exp_average_summary(ops, dep_ops, decay=0.9, name='avg', scope_pfix='', raw_pfix=' (raw)', avg_pfix=' (avg)'): averages = tf.train.ExponentialMovingAverage(decay, name=name) averages_op = averages.apply(ops) for op in ops: tf.scalar_summary(scope_pfix + op.name + raw_pfix, op) tf.scalar_summary(scope_pfix + op.name + avg_pfix, averages.average(op)) with tf.control_dependencies([averages_op]): for i, dep_op in enumerate(dep_ops): dep_ops[i] = tf.identity(dep_op, name=dep_op.name.split(':')[0]) return dep_ops
Example #23
Source File: network.py From scene-graph-TF-release with MIT License | 5 votes |
def _variable_summaries(self, var, name): """Attach a lot of summaries to a Tensor.""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.scalar_summary('mean/' + name, mean) tf.histogram_summary(name, var)
Example #24
Source File: model.py From web_page_classification with MIT License | 5 votes |
def _add_loss_summaries(self, total_loss): """Add summaries for losses in CNN model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # # Compute the moving average of all individual losses and the total loss. # loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') # losses = tf.get_collection('losses') # loss_averages_op = loss_averages.apply(losses + [total_loss]) # # # Attach a scalar summary to all individual losses and the total loss; do the # # same for the averaged version of the losses. # for l in losses + [total_loss]: # # Name each loss as '(raw)' and name the moving average version of the loss # # as the original loss name. # tf.scalar_summary(l.op.name + ' (raw)', l) # tf.scalar_summary(l.op.name, loss_averages.average(l)) # losses = tf.get_collection('REGULARIZATION_LOSSES') # all_losses = losses + [total_loss] all_losses = [total_loss] # is it necessary to add all REGULARIZATION_LOSSES ????? for l in all_losses: tf.scalar_summary(l.op.name, l)
Example #25
Source File: opt.py From phillip with GNU General Public License v3.0 | 5 votes |
def optimize(self, loss, params=None, predictions=None, metric=None): grads_vars = self.optimizer.compute_gradients(loss, var_list=params) grads, params = map(list, zip(*grads_vars)) if self.natural: grads = self.natgrad(params, grads, predictions, metric) grads = [tf.check_numerics(g, "NaN gradient in param %d" % i) for i, g in enumerate(grads)] flat_params, flat_grads = [tf.abs(tf.concat(axis=0, values=[tf.reshape(t, [-1]) for t in ts])) for ts in (params, grads)] #flat_ratios = flat_grads / flat_params #tf.scalar_summary('grad_param_max', tf.reduce_max(flat_ratios)) #tf.scalar_summary('grad_param_avg', tf.reduce_mean(flat_ratios)) grad_max = tf.reduce_max(flat_grads) tf.summary.scalar('grad_max', grad_max) tf.summary.scalar('grad_avg', tf.reduce_mean(flat_grads)) if self.clip: clip = tf.minimum(self.clip, grad_max) / grad_max grads = [g*clip for g in grads] return self.optimizer.apply_gradients(zip(grads, params))
Example #26
Source File: neural_network.py From sentiment-analysis-tensorflow with Apache License 2.0 | 5 votes |
def __loss(self, losses): """ :param losses: Cross entropy losses with shape [batch_size] :return: Cross entropy loss mean """ with tf.name_scope('loss'): loss = tf.reduce_mean(losses, name='loss') tf.scalar_summary('loss', loss) return loss
Example #27
Source File: kaggle_mnist_alexnet_model.py From tensorflow-alexnet with MIT License | 5 votes |
def accuracy(logits, labels): # accuracy with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)), tf.float32)) tf.scalar_summary('accuracy', accuracy) return accuracy
Example #28
Source File: network.py From iLID with MIT License | 5 votes |
def set_accuracy(self): correct_pred = tf.equal(tf.argmax(self.layers.output, 1), tf.argmax(self.y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) tf.scalar_summary("accuracy", self.accuracy)
Example #29
Source File: deepaudio.py From iLID with MIT License | 5 votes |
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ tensor_name = x.op.name tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Example #30
Source File: network.py From iLID with MIT License | 5 votes |
def set_optimizer(self, learning_rate, decay_steps, optimizer = tf.train.AdamOptimizer): global_step = tf.Variable(0, trainable=False) lr = tf.train.exponential_decay(learning_rate, global_step, decay_steps, 0.1, staircase=True) tf.scalar_summary("learning_rate", lr) self.optimizer = optimizer(learning_rate=lr).minimize(self.cost, global_step = global_step)