Python tensorflow.global_norm() Examples
The following are 30
code examples of tensorflow.global_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: model_deploy.py From edafa with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #2
Source File: model_deploy.py From CVTron with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #3
Source File: model_deploy.py From TwinGAN with Apache License 2.0 | 6 votes |
def add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram('gradients/%s' %var.op.name, grad_values)) summaries.append(tf.summary.histogram('gradient_norms/%s' %var.op.name, tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #4
Source File: model_deploy.py From ctw-baseline with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #5
Source File: model_deploy.py From STORK with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #6
Source File: ac_net.py From reinforcement_learning with MIT License | 6 votes |
def __init__(self, state_size, action_size, lr, name, n_h1=400, n_h2=300, global_name='global'): self.state_size = state_size self.action_size = action_size self.name = name self.n_h1 = n_h1 self.n_h2 = n_h2 self.optimizer = tf.train.AdamOptimizer(lr) self.input_s, self.input_a, self.advantage, self.target_v, self.policy, self.value, self.action_est, self.model_variables = self._build_network( name) # 0.5, 0.2, 1.0 self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1]))) self.entropy_loss = 1.0 * tf.reduce_sum(self.policy * tf.log(self.policy)) self.policy_loss = 1.0 * tf.reduce_sum(-tf.log(self.action_est) * self.advantage) self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.model_variables]) # self.loss = 0.5 * self.value_loss + self.policy_loss + 0.2 * self.entropy_loss self.loss = self.value_loss + self.policy_loss + self.entropy_loss self.gradients = tf.gradients(self.loss, self.model_variables) if name != global_name: self.var_norms = tf.global_norm(self.model_variables) global_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, global_name) self.apply_gradients = self.optimizer.apply_gradients(zip(self.gradients, global_variables))
Example #7
Source File: model_deploy.py From YOLO2TensorFlow with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #8
Source File: model_deploy.py From Cross-Modal-Projection-Learning with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #9
Source File: model_deploy.py From tensorflow-densenet with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #10
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _update_value_step(self, observ, reward, length): """Compute the current value loss and perform a gradient update step. Args: observ: Sequences of observations. reward: Sequences of reward. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor. """ loss, summary = self._value_loss(observ, reward, length) gradients, variables = ( zip(*self._value_optimizer.compute_gradients(loss))) optimize = self._value_optimizer.apply_gradients( zip(gradients, variables)) summary = tf.summary.merge([ summary, tf.summary.scalar('gradient_norm', tf.global_norm(gradients)), utility.gradient_summaries( zip(gradients, variables), dict(value=r'.*'))]) with tf.control_dependencies([optimize]): return [tf.identity(loss), tf.identity(summary)]
Example #11
Source File: model_deploy.py From DOTA_models with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #12
Source File: model_deploy.py From morph-net with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #13
Source File: model_deploy.py From tensorflow_yolo2 with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #14
Source File: model_deploy.py From MAX-Image-Segmenter with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #15
Source File: model_deploy.py From shuttleNet with GNU General Public License v3.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.histogram_summary(var.op.name + ':gradient', grad_values)) summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #16
Source File: model_deploy.py From MobileNet with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #17
Source File: model_deploy.py From hops-tensorflow with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #18
Source File: optimize.py From UNMT-SPR with MIT License | 6 votes |
def create_train_op(loss, optimizer, global_step, params): with tf.name_scope("create_train_op"): grads_and_vars = optimizer.compute_gradients( loss, colocate_gradients_with_ops=True) gradients = [item[0] for item in grads_and_vars] variables = [item[1] for item in grads_and_vars] # Add summaries tf.summary.scalar("loss", loss) tf.summary.scalar("global_norm/gradient_norm", tf.global_norm(gradients)) # Gradient clipping if isinstance(params.clip_grad_norm or None, float) and params.clip_grad_norm > 0: gradients, _ = tf.clip_by_global_norm(gradients, params.clip_grad_norm) # Update variables grads_and_vars = list(zip(gradients, variables)) train_op = optimizer.apply_gradients(grads_and_vars, global_step) return loss, train_op
Example #19
Source File: model_deploy.py From garbage-object-detection-tensorflow with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #20
Source File: model_deploy.py From yolo_v2 with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #21
Source File: model_deploy.py From terngrad with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #22
Source File: training.py From ELMo_Chin with Apache License 2.0 | 6 votes |
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables): # wrapper around tf.clip_by_global_norm that also does summary ops of norms # compute norms # use global_norm with one element to handle IndexedSlices vs dense norms = [tf.global_norm([t]) for t in t_list] # summary ops before clipping summary_ops = [] for ns, v in zip(norms, variables): name = 'norm_pre_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) # clip clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm) # summary ops after clipping norms_post = [tf.global_norm([t]) for t in clipped_t_list] for ns, v in zip(norms_post, variables): name = 'norm_post_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) summary_ops.append(tf.summary.scalar(norm_name, tf_norm)) return clipped_t_list, tf_norm, summary_ops
Example #23
Source File: model_deploy.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #24
Source File: model_deploy.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #25
Source File: model_deploy.py From CBAM-tensorflow-slim with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #26
Source File: training.py From bilm-tf with Apache License 2.0 | 6 votes |
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables): # wrapper around tf.clip_by_global_norm that also does summary ops of norms # compute norms # use global_norm with one element to handle IndexedSlices vs dense norms = [tf.global_norm([t]) for t in t_list] # summary ops before clipping summary_ops = [] for ns, v in zip(norms, variables): name = 'norm_pre_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) # clip clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm) # summary ops after clipping norms_post = [tf.global_norm([t]) for t in clipped_t_list] for ns, v in zip(norms_post, variables): name = 'norm_post_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) summary_ops.append(tf.summary.scalar(norm_name, tf_norm)) return clipped_t_list, tf_norm, summary_ops
Example #27
Source File: model_deploy.py From Gun-Detector with Apache License 2.0 | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
Example #28
Source File: training.py From nlp_research with MIT License | 6 votes |
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables): # wrapper around tf.clip_by_global_norm that also does summary ops of norms # compute norms # use global_norm with one element to handle IndexedSlices vs dense norms = [tf.global_norm([t]) for t in t_list] # summary ops before clipping summary_ops = [] for ns, v in zip(norms, variables): name = 'norm_pre_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) # clip clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm) # summary ops after clipping norms_post = [tf.global_norm([t]) for t in clipped_t_list] for ns, v in zip(norms_post, variables): name = 'norm_post_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) summary_ops.append(tf.summary.scalar(norm_name, tf_norm)) return clipped_t_list, tf_norm, summary_ops
Example #29
Source File: training.py From embedding with MIT License | 6 votes |
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables): # wrapper around tf.clip_by_global_norm that also does summary ops of norms # compute norms # use global_norm with one element to handle IndexedSlices vs dense norms = [tf.global_norm([t]) for t in t_list] # summary ops before clipping summary_ops = [] for ns, v in zip(norms, variables): name = 'norm_pre_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) # clip clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm) # summary ops after clipping norms_post = [tf.global_norm([t]) for t in clipped_t_list] for ns, v in zip(norms_post, variables): name = 'norm_post_clip/' + v.name.replace(":", "_") summary_ops.append(tf.summary.scalar(name, ns)) summary_ops.append(tf.summary.scalar(norm_name, tf_norm)) return clipped_t_list, tf_norm, summary_ops
Example #30
Source File: model_deploy.py From Creative-Adversarial-Networks with MIT License | 6 votes |
def _add_gradients_summaries(grads_and_vars): """Add histogram summaries to gradients. Note: The summaries are also added to the SUMMARIES collection. Args: grads_and_vars: A list of gradient to variable pairs (tuples). Returns: The _list_ of the added summaries for grads_and_vars. """ summaries = [] for grad, var in grads_and_vars: if grad is not None: if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.summary.histogram(var.op.name + ':gradient', grad_values)) summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm', tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries