Python tensorflow.Print() Examples
The following are 30
code examples of tensorflow.Print().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: tfops.py From glow with MIT License | 6 votes |
def print_act_stats(x, _str=""): if not do_print_act_stats: return x if hvd.rank() != 0: return x if len(x.get_shape()) == 1: x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True) if len(x.get_shape()) == 2: x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True) if len(x.get_shape()) == 4: x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True) stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean), tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))] return tf.Print(x, stats, "["+_str+"] "+x.name) # Allreduce methods
Example #2
Source File: preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') return image
Example #3
Source File: preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') return image
Example #4
Source File: preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # , # fancy_upscaling=False, # dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image
Example #5
Source File: preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) #, # fancy_upscaling=False, # dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image
Example #6
Source File: image_preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') return image
Example #7
Source File: preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') return image
Example #8
Source File: kfac_utils.py From stable-baselines with MIT License | 6 votes |
def detect_min_val(input_mat, var, threshold=1e-6, name='', debug=False): """ If debug is not set, will run clipout_neg. Else, will clip and print out odd eigen values :param input_mat: (TensorFlow Tensor) :param var: (TensorFlow Tensor) variable :param threshold: (float) the cutoff threshold :param name: (str) the name of the variable :param debug: (bool) debug function :return: (TensorFlow Tensor) clipped tensor """ eigen_min = tf.reduce_min(input_mat) eigen_max = tf.reduce_max(input_mat) eigen_ratio = eigen_max / eigen_min input_mat_clipped = clipout_neg(input_mat, threshold) if debug: input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print( input_mat_clipped, [tf.convert_to_tensor('odd ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio])) return input_mat_clipped
Example #9
Source File: util.py From bnn with MIT License | 6 votes |
def pil_image_to_tf_summary(img, tag="debug_img"): # serialise png bytes sio = io.BytesIO() img.save(sio, format="png") png_bytes = sio.getvalue() # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto return tf.Summary(value=[tf.Summary.Value(tag=tag, image=tf.Summary.Image(height=img.size[0], width=img.size[1], colorspace=3, # RGB encoded_image_string=png_bytes))]) #def dice_loss(y, y_hat, batch_size, smoothing=0): # y = tf.reshape(y, (batch_size, -1)) # y_hat = tf.reshape(y_hat, (batch_size, -1)) # intersection = y * y_hat # intersection_rs = tf.reduce_sum(intersection, axis=1) # nom = intersection_rs + smoothing # denom = tf.reduce_sum(y, axis=1) + tf.reduce_sum(y_hat, axis=1) + smoothing # score = 2.0 * (nom / denom) # loss = 1.0 - score # loss = tf.Print(loss, [intersection, intersection_rs, nom, denom], first_n=100, summarize=10000) # return loss
Example #10
Source File: TestUpd.py From How-to-Learn-from-Little-Data with MIT License | 6 votes |
def omniglot(): sess = tf.InteractiveSession() """ def wrapper(v): return tf.Print(v, [v], message="Printing v") v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix') sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp') temp = wrapper(v) #with tf.control_dependencies([temp]): temp.eval() print 'Hello'""" def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:] val = tf.cast(val, V.dtype) def body(_, (v, d2, chg)): d2_int = tf.cast(d2, tf.int32) return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]]) Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update") return Z
Example #11
Source File: algorithm.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _update_value(self, observ, reward, length): """Perform multiple update steps of the value baseline. We need to decide for the summary of one iteration, and thus choose the one after half of the iterations. Args: observ: Sequences of observations. reward: Sequences of reward. length: Batch of sequence lengths. Returns: Summary tensor. """ with tf.name_scope('update_value'): loss, summary = tf.scan( lambda _1, _2: self._update_value_step(observ, reward, length), tf.range(self._config.update_epochs_value), [0., ''], parallel_iterations=1) print_loss = tf.Print(0, [tf.reduce_mean(loss)], 'value loss: ') with tf.control_dependencies([loss, print_loss]): return summary[self._config.update_epochs_value // 2]
Example #12
Source File: losses.py From DOTA_models with Apache License 2.0 | 6 votes |
def log_quaternion_loss(predictions, labels, params): """A helper function to compute the mean error between batches of quaternions. The caller is expected to add the loss to the graph. Args: predictions: A Tensor of size [batch_size, 4]. labels: A Tensor of size [batch_size, 4]. params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. Returns: A Tensor of size 1, denoting the mean error between batches of quaternions. """ use_logging = params['use_logging'] logcost = log_quaternion_loss_batch(predictions, labels, params) logcost = tf.reduce_sum(logcost, [0]) batch_size = params['batch_size'] logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss') if use_logging: logcost = tf.Print( logcost, [logcost], '[logcost]', name='log_quaternion_loss_print') return logcost
Example #13
Source File: preprocessing.py From models with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.compat.v1.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3) # , # fancy_upscaling=False, # dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') image = tf.image.convert_image_dtype(image, dtype=tf.float32) return image
Example #14
Source File: kfac.py From stable-baselines with MIT License | 6 votes |
def apply_stats_eigen(self, eigen_list): """ apply the update using the eigen values of the stats :param eigen_list: ([TensorFlow Tensor]) The list of eigen values of the stats :return: ([TensorFlow Tensor]) update operations """ update_ops = [] if self.verbose > 1: print(('updating %d eigenvalue/vectors' % len(eigen_list))) for _, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)): stats_eigen_var = self.eigen_reverse_lookup[mark] update_ops.append( tf.assign(stats_eigen_var, tensor, use_locking=True)) with tf.control_dependencies(update_ops): factor_step_op = tf.assign_add(self.factor_step, 1) update_ops.append(factor_step_op) if KFAC_DEBUG: update_ops.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('updated kfac factors')])) return update_ops
Example #15
Source File: TestUpd.py From NTM-One-Shot-TF with MIT License | 6 votes |
def omniglot(): sess = tf.InteractiveSession() """ def wrapper(v): return tf.Print(v, [v], message="Printing v") v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix') sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp') temp = wrapper(v) #with tf.control_dependencies([temp]): temp.eval() print 'Hello'""" def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:] val = tf.cast(val, V.dtype) def body(_, (v, d2, chg)): d2_int = tf.cast(d2, tf.int32) return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]]) Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update") return Z
Example #16
Source File: kfac.py From DRL_DeliveryDuel with MIT License | 5 votes |
def apply_gradients(self, grads): coldOptim = tf.train.MomentumOptimizer( self._cold_lr, self._momentum) def coldSGDstart(): sgd_grads, sgd_var = zip(*grads) if self.max_grad_norm != None: sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm) sgd_grads = list(zip(sgd_grads,sgd_var)) sgd_step_op = tf.assign_add(self.sgd_step, 1) coldOptim_op = coldOptim.apply_gradients(sgd_grads) if KFAC_DEBUG: with tf.control_dependencies([sgd_step_op, coldOptim_op]): sgd_step_op = tf.Print( sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')]) return tf.group(*[sgd_step_op, coldOptim_op]) kfacOptim_op, qr = self.apply_gradients_kfac(grads) def warmKFACstart(): return kfacOptim_op return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
Example #17
Source File: kfac.py From ICML2019-TREX with MIT License | 5 votes |
def applyStatsEigen(self, eigen_list): updateOps = [] print(('updating %d eigenvalue/vectors' % len(eigen_list))) for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)): stats_eigen_var = self.eigen_reverse_lookup[mark] updateOps.append( tf.assign(stats_eigen_var, tensor, use_locking=True)) with tf.control_dependencies(updateOps): factor_step_op = tf.assign_add(self.factor_step, 1) updateOps.append(factor_step_op) if KFAC_DEBUG: updateOps.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('updated kfac factors')])) return updateOps
Example #18
Source File: kfac.py From ICML2019-TREX with MIT License | 5 votes |
def apply_gradients(self, grads): coldOptim = tf.train.MomentumOptimizer( self._cold_lr, self._momentum) def coldSGDstart(): sgd_grads, sgd_var = zip(*grads) if self.max_grad_norm != None: sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm) sgd_grads = list(zip(sgd_grads,sgd_var)) sgd_step_op = tf.assign_add(self.sgd_step, 1) coldOptim_op = coldOptim.apply_gradients(sgd_grads) if KFAC_DEBUG: with tf.control_dependencies([sgd_step_op, coldOptim_op]): sgd_step_op = tf.Print( sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')]) return tf.group(*[sgd_step_op, coldOptim_op]) kfacOptim_op, qr = self.apply_gradients_kfac(grads) def warmKFACstart(): return kfacOptim_op return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
Example #19
Source File: kfac_utils.py From ICML2019-TREX with MIT License | 5 votes |
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False): eigen_min = tf.reduce_min(input_mat) eigen_max = tf.reduce_max(input_mat) eigen_ratio = eigen_max / eigen_min input_mat_clipped = clipoutNeg(input_mat, threshold) if debug: input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print( input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio])) return input_mat_clipped
Example #20
Source File: kfac.py From ICML2019-TREX with MIT License | 5 votes |
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.): updateOps = [] # obtain the stats var list for stats_var in statsUpdates: stats_new = statsUpdates[stats_var] if accumulate: # simple superbatch averaging update_op = tf.assign_add( stats_var, accumulateCoeff * stats_new, use_locking=True) else: # exponential running averaging update_op = tf.assign( stats_var, stats_var * self._stats_decay, use_locking=True) update_op = tf.assign_add( update_op, (1. - self._stats_decay) * stats_new, use_locking=True) updateOps.append(update_op) with tf.control_dependencies(updateOps): stats_step_op = tf.assign_add(self.stats_step, 1) if KFAC_DEBUG: stats_step_op = (tf.Print(stats_step_op, [tf.convert_to_tensor('step:'), self.global_step, tf.convert_to_tensor('fac step:'), self.factor_step, tf.convert_to_tensor('sgd step:'), self.sgd_step, tf.convert_to_tensor('Accum:'), tf.convert_to_tensor(accumulate), tf.convert_to_tensor('Accum coeff:'), tf.convert_to_tensor(accumulateCoeff), tf.convert_to_tensor('stat step:'), self.stats_step, updateOps[0], updateOps[1]])) return [stats_step_op, ]
Example #21
Source File: kfac.py From DRL_DeliveryDuel with MIT License | 5 votes |
def applyStatsEigen(self, eigen_list): updateOps = [] print(('updating %d eigenvalue/vectors' % len(eigen_list))) for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)): stats_eigen_var = self.eigen_reverse_lookup[mark] updateOps.append( tf.assign(stats_eigen_var, tensor, use_locking=True)) with tf.control_dependencies(updateOps): factor_step_op = tf.assign_add(self.factor_step, 1) updateOps.append(factor_step_op) if KFAC_DEBUG: updateOps.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('updated kfac factors')])) return updateOps
Example #22
Source File: fcn8_vgg.py From MachineLearning with Apache License 2.0 | 5 votes |
def _upscore_layer(self, bottom, shape, num_classes, name, debug, ksize=4, stride=2): strides = [1, stride, stride, 1] with tf.variable_scope(name): in_features = bottom.get_shape()[3].value if shape is None: # Compute shape out of Bottom in_shape = tf.shape(bottom) h = ((in_shape[1] - 1) * stride) + 1 w = ((in_shape[2] - 1) * stride) + 1 new_shape = [in_shape[0], h, w, num_classes] else: new_shape = [shape[0], shape[1], shape[2], num_classes] output_shape = tf.stack(new_shape) logging.debug("Layer: %s, Fan-in: %d" % (name, in_features)) f_shape = [ksize, ksize, num_classes, in_features] # create num_input = ksize * ksize * in_features / stride stddev = (2 / num_input) ** 0.5 weights = self.get_deconv_filter(f_shape) self._add_wd_and_summary(weights, self.wd, "fc_wlosses") deconv = tf.nn.conv2d_transpose(bottom, weights, output_shape, strides=strides, padding='SAME') if debug: deconv = tf.Print(deconv, [tf.shape(deconv)], message='Shape of %s' % name, summarize=4, first_n=1) _activation_summary(deconv) return deconv
Example #23
Source File: glow.py From BERT with Apache License 2.0 | 5 votes |
def body(self, features): exp_coupling = ["affine", "additive"] if self.hparams.coupling not in exp_coupling: raise ValueError("Expected hparams.coupling to be in %s, got %s" % (exp_coupling, self.hparams.coupling)) if self.is_training: init_features = self.create_init_batch(features) init_op = self.objective_tower(init_features, init=True) init_op = tf.Print( init_op, [init_op], message="Triggering data-dependent init.", first_n=20) tf.add_to_collection("glow_init_op", init_op) train_op = self.objective_tower(features, init=False) return tf.zeros_like(features["targets"]), {"training": train_op}
Example #24
Source File: kfac.py From stable-baselines with MIT License | 5 votes |
def apply_gradients(self, grads): """ apply the gradient :param grads: ([TensorFlow Tensor]) the gradient :return: (function, QueueRunner) train operation, queue operation runner """ cold_optim = tf.train.MomentumOptimizer(self._cold_lr, self._momentum) def _cold_sgd_start(): sgd_grads, sgd_var = zip(*grads) if self.max_grad_norm is not None: sgd_grads, _ = tf.clip_by_global_norm(sgd_grads, self.max_grad_norm) sgd_grads = list(zip(sgd_grads, sgd_var)) sgd_step_op = tf.assign_add(self.sgd_step, 1) cold_optim_op = cold_optim.apply_gradients(sgd_grads) if KFAC_DEBUG: with tf.control_dependencies([sgd_step_op, cold_optim_op]): sgd_step_op = tf.Print( sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')]) return tf.group(*[sgd_step_op, cold_optim_op]) # remove unused variables grads = [(grad, var) for (grad, var) in grads if grad is not None] kfac_optim_op, queue_runner = self.apply_gradients_kfac(grads) def _warm_kfac_start(): return kfac_optim_op return tf.cond(tf.greater(self.sgd_step, self._cold_iter), _warm_kfac_start, _cold_sgd_start), queue_runner
Example #25
Source File: kfac.py From stable-baselines with MIT License | 5 votes |
def compute_stats_eigen(self): """ compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue :return: ([TensorFlow Tensor]) update operations """ # TODO: figure out why this op has delays (possibly moving eigenvectors around?) with tf.device('/cpu:0'): stats_eigen = self.stats_eigen computed_eigen = {} eigen_reverse_lookup = {} update_ops = [] # sync copied stats with tf.control_dependencies([]): for stats_var in stats_eigen: if stats_var not in computed_eigen: eigen_decomposition = tf.self_adjoint_eig(stats_var) eigen_values = eigen_decomposition[0] eigen_vectors = eigen_decomposition[1] if self._use_float64: eigen_values = tf.cast(eigen_values, tf.float64) eigen_vectors = tf.cast(eigen_vectors, tf.float64) update_ops.append(eigen_values) update_ops.append(eigen_vectors) computed_eigen[stats_var] = {'e': eigen_values, 'Q': eigen_vectors} eigen_reverse_lookup[eigen_values] = stats_eigen[stats_var]['e'] eigen_reverse_lookup[eigen_vectors] = stats_eigen[stats_var]['Q'] self.eigen_reverse_lookup = eigen_reverse_lookup self.eigen_update_list = update_ops if KFAC_DEBUG: self.eigen_update_list = [item for item in update_ops] with tf.control_dependencies(update_ops): update_ops.append(tf.Print(tf.constant( 0.), [tf.convert_to_tensor('computed factor eigen')])) return update_ops
Example #26
Source File: kfac.py From stable-baselines with MIT License | 5 votes |
def _apply_stats(self, stats_updates, accumulate=False, accumulate_coeff=0.): update_ops = [] # obtain the stats var list for stats_var in stats_updates: stats_new = stats_updates[stats_var] if accumulate: # simple superbatch averaging update_op = tf.assign_add( stats_var, accumulate_coeff * stats_new, use_locking=True) else: # exponential running averaging update_op = tf.assign( stats_var, stats_var * self._stats_decay, use_locking=True) update_op = tf.assign_add( update_op, (1. - self._stats_decay) * stats_new, use_locking=True) update_ops.append(update_op) with tf.control_dependencies(update_ops): stats_step_op = tf.assign_add(self.stats_step, 1) if KFAC_DEBUG: stats_step_op = (tf.Print(stats_step_op, [tf.convert_to_tensor('step:'), self.global_step, tf.convert_to_tensor('fac step:'), self.factor_step, tf.convert_to_tensor('sgd step:'), self.sgd_step, tf.convert_to_tensor('Accum:'), tf.convert_to_tensor(accumulate), tf.convert_to_tensor('Accum coeff:'), tf.convert_to_tensor(accumulate_coeff), tf.convert_to_tensor('stat step:'), self.stats_step, update_ops[0], update_ops[1]])) return [stats_step_op, ]
Example #27
Source File: prioritized_replay.py From rlgraph with Apache License 2.0 | 5 votes |
def _graph_fn_get_records(self, num_records=1): # Sum total mass. current_size = self.read_variable(self.size) stored_elements_prob_sum = self.sum_segment_tree.reduce(start=0, limit=current_size - 1) # Sample the entire batch. sample = stored_elements_prob_sum * tf.random_uniform(shape=(num_records, )) # Sample by looking up prefix sum. sample_indices = tf.map_fn(fn=self.sum_segment_tree.index_of_prefixsum, elems=sample, dtype=tf.int32) # sample_indices = self.sum_segment_tree.index_of_prefixsum(sample) # Importance correction. total_prob = self.sum_segment_tree.reduce(start=0, limit=self.priority_capacity - 1) min_prob = self.min_segment_tree.get_min_value() / total_prob max_weight = tf.pow(x=min_prob * tf.cast(current_size, tf.float32), y=-self.beta) def importance_sampling_fn(sample_index): sample_prob = self.sum_segment_tree.get(sample_index) / stored_elements_prob_sum weight = tf.pow(x=sample_prob * tf.cast(current_size, tf.float32), y=-self.beta) return weight / max_weight corrected_weights = tf.map_fn( fn=importance_sampling_fn, elems=sample_indices, dtype=tf.float32 ) # sample_indices = tf.Print(sample_indices, [sample_indices, self.sum_segment_tree.values], summarize=1000, # message='sample indices, segment tree values = ') return self._read_records(indices=sample_indices), sample_indices, corrected_weights
Example #28
Source File: kfac.py From rl_graph_generation with BSD 3-Clause "New" or "Revised" License | 5 votes |
def apply_gradients(self, grads): coldOptim = tf.train.MomentumOptimizer( self._cold_lr, self._momentum) def coldSGDstart(): sgd_grads, sgd_var = zip(*grads) if self.max_grad_norm != None: sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm) sgd_grads = list(zip(sgd_grads,sgd_var)) sgd_step_op = tf.assign_add(self.sgd_step, 1) coldOptim_op = coldOptim.apply_gradients(sgd_grads) if KFAC_DEBUG: with tf.control_dependencies([sgd_step_op, coldOptim_op]): sgd_step_op = tf.Print( sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')]) return tf.group(*[sgd_step_op, coldOptim_op]) kfacOptim_op, qr = self.apply_gradients_kfac(grads) def warmKFACstart(): return kfacOptim_op return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
Example #29
Source File: tf_logits.py From Black-Box-Audio with MIT License | 5 votes |
def get_logits(new_input, length, first=[]): """ Compute the logits for a given waveform. First, preprocess with the TF version of MFC above, and then call DeepSpeech on the features. """ # new_input = tf.Print(new_input, [tf.shape(new_input)]) # We need to init DeepSpeech the first time we're called if first == []: first.append(False) # Okay, so this is ugly again. # We just want it to not crash. tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt" DeepSpeech.initialize_globals() print('initialized deepspeech globals') batch_size = new_input.get_shape()[0] # 1. Compute the MFCCs for the input audio # (this is differentable with our implementation above) empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32) new_input_to_mfcc = compute_mfcc(new_input)[:, ::2] features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1) # 2. We get to see 9 frames at a time to make our decision, # so concatenate them together. features = tf.reshape(features, [new_input.get_shape()[0], -1]) features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1) features = tf.reshape(features, [batch_size, -1, 19*26]) # 3. Whiten the data mean, var = tf.nn.moments(features, axes=[0,1,2]) features = (features-mean)/(var**.5) # 4. Finally we process it with DeepSpeech logits = DeepSpeech.BiRNN(features, length, [0]*10) return logits
Example #30
Source File: kfac.py From rl_graph_generation with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.): updateOps = [] # obtain the stats var list for stats_var in statsUpdates: stats_new = statsUpdates[stats_var] if accumulate: # simple superbatch averaging update_op = tf.assign_add( stats_var, accumulateCoeff * stats_new, use_locking=True) else: # exponential running averaging update_op = tf.assign( stats_var, stats_var * self._stats_decay, use_locking=True) update_op = tf.assign_add( update_op, (1. - self._stats_decay) * stats_new, use_locking=True) updateOps.append(update_op) with tf.control_dependencies(updateOps): stats_step_op = tf.assign_add(self.stats_step, 1) if KFAC_DEBUG: stats_step_op = (tf.Print(stats_step_op, [tf.convert_to_tensor('step:'), self.global_step, tf.convert_to_tensor('fac step:'), self.factor_step, tf.convert_to_tensor('sgd step:'), self.sgd_step, tf.convert_to_tensor('Accum:'), tf.convert_to_tensor(accumulate), tf.convert_to_tensor('Accum coeff:'), tf.convert_to_tensor(accumulateCoeff), tf.convert_to_tensor('stat step:'), self.stats_step, updateOps[0], updateOps[1]])) return [stats_step_op, ]