Python tensorflow.compat.v1.name_scope() Examples
The following are 30
code examples of tensorflow.compat.v1.name_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.compat.v1
, or try the search function
.
Example #1
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def add_scope(scope=None, scope_fn=None): """Return a decorator which add a TF name/variable scope to a function. Note that the function returned by the decorator accept an additional 'name' parameter, which can overwrite the name scope given when the function is created. Args: scope (str): name of the scope. If None, the function name is used. scope_fn (fct): Either tf.name_scope or tf.variable_scope Returns: fct: the add_scope decorator """ def decorator(f): @functools.wraps(f) def decorated(*args, **kwargs): name = kwargs.pop("name", None) # Python 2 hack for keyword only args with scope_fn(name or scope or f.__name__): return f(*args, **kwargs) return decorated return decorator
Example #2
Source File: modalities.py From tensor2tensor with Apache License 2.0 | 6 votes |
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn): """Compute the CTC loss.""" del model_hparams, vocab_size # unused arg logits = top_out with tf.name_scope("ctc_loss", values=[logits, targets]): # For CTC we assume targets are 1d, [batch, length, 1, 1] here. targets_shape = targets.get_shape().as_list() assert len(targets_shape) == 4 assert targets_shape[2] == 1 assert targets_shape[3] == 1 targets = tf.squeeze(targets, axis=[2, 3]) logits = tf.squeeze(logits, axis=[2, 3]) targets_mask = 1 - tf.to_int32(tf.equal(targets, 0)) targets_lengths = tf.reduce_sum(targets_mask, axis=1) sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse( targets, targets_lengths) xent = tf.nn.ctc_loss( sparse_targets, logits, targets_lengths, time_major=False, preprocess_collapse_repeated=False, ctc_merge_repeated=False) weights = weight_fn(targets) return tf.reduce_sum(xent), tf.reduce_sum(weights)
Example #3
Source File: epva.py From tensor2tensor with Apache License 2.0 | 6 votes |
def calc_loss_psnr(gen_images, images, name, hparams=None, use_l1_loss=False): """Calculates loss and psnr for predictions over multiple timesteps.""" del hparams with tf.name_scope(name): loss, error, psnr_all = 0.0, 0.0, 0.0 for _, x, gx in zip(range(len(gen_images)), images, gen_images): recon_cost = mean_squared_error(x, gx) if use_l1_loss: recon_cost = l1_error(x, gx) error_i = l1_error(x, gx) psnr_i = peak_signal_to_noise_ratio(x, gx) psnr_all += psnr_i error += error_i loss += recon_cost psnr_all /= tf.to_float(len(gen_images)) loss /= tf.to_float(len(gen_images)) error /= tf.to_float(len(gen_images)) # if not hparams.use_tpu: tf.summary.scalar('psnr_all', psnr_all) tf.summary.scalar('loss', loss) return loss, psnr_all
Example #4
Source File: allreduce.py From benchmarks with Apache License 2.0 | 6 votes |
def unpack_grad_tuple(gv, gpt): """Unpack a previously packed collection of gradient tensors. Args: gv: A (grad, var) pair to be unpacked. gpt: A GradPackTuple describing the packing operation that produced gv. Returns: A list of (grad, var) pairs corresponding to the values that were originally packed into gv, maybe following subsequent operations like reduction. """ elt_widths = [x.num_elements() for x in gpt.shapes] with tf.device(gv[0][0].device): with tf.name_scope('unpack'): splits = tf.split(gv[0], elt_widths) unpacked_gv = [] for idx, s in enumerate(splits): unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) return unpacked_gv
Example #5
Source File: slicenet.py From tensor2tensor with Apache License 2.0 | 6 votes |
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Example #6
Source File: model.py From benchmarks with Apache License 2.0 | 6 votes |
def loss_function(self, inputs, build_network_result): """Returns the op to measure the loss of the model.""" logits = build_network_result.logits _, labels = inputs # TODO(laigd): consider putting the aux logit in the Inception model, # which could call super.loss_function twice, once with the normal logits # and once with the aux logits. aux_logits = build_network_result.extra_info with tf.name_scope('xentropy'): mlperf.logger.log(key=mlperf.tags.MODEL_HP_LOSS_FN, value=mlperf.tags.CCE) cross_entropy = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=labels) loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') if aux_logits is not None: with tf.name_scope('aux_xentropy'): aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy( logits=aux_logits, labels=labels) aux_loss = 0.4 * tf.reduce_mean(aux_cross_entropy, name='aux_loss') loss = tf.add_n([loss, aux_loss]) return loss
Example #7
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def remove(self, x): """Remove padding from the given tensor. Args: x (tf.Tensor): of shape [dim_origin,...] Returns: a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ with tf.name_scope("pad_reduce/remove"): x_shape = x.get_shape().as_list() x = tf.gather_nd( x, indices=self.nonpad_ids, ) if not tf.executing_eagerly(): # This is a hack but for some reason, gather_nd return a tensor of # undefined shape, so the shape is set up manually x.set_shape([None] + x_shape[1:]) return x
Example #8
Source File: preprocessing.py From benchmarks with Apache License 2.0 | 6 votes |
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): """Decode a JPEG string into one 3-D float image Tensor. Args: image_buffer: scalar string Tensor. scope: Optional scope for op_scope. Returns: 3-D float Tensor with values ranging from [0, 1). """ # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): with tf.name_scope(scope or 'decode_jpeg'): # Decode the string as an RGB JPEG. # Note that the resulting image contains an unknown height and width # that is set dynamically by decode_jpeg. In other words, the height # and width of image is unknown at compile-time. image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') return image
Example #9
Source File: expert_utils.py From tensor2tensor with Apache License 2.0 | 6 votes |
def restore(self, x): """Add padding back to the given tensor. Args: x (tf.Tensor): of shape [dim_compressed,...] Returns: a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The dim is restored from the original reference tensor """ with tf.name_scope("pad_reduce/restore"): x = tf.scatter_nd( indices=self.nonpad_ids, updates=x, shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0), ) return x
Example #10
Source File: seq2seq.py From magenta with Apache License 2.0 | 6 votes |
def _call_sampler(sample_n_fn, sample_shape, name=None): """Reshapes vector of samples.""" with tf.name_scope(name, "call_sampler", values=[sample_shape]): sample_shape = tf.convert_to_tensor( sample_shape, dtype=tf.int32, name="sample_shape") # Ensure sample_shape is a vector (vs just a scalar). pad = tf.cast(tf.equal(tf.rank(sample_shape), 0), tf.int32) sample_shape = tf.reshape( sample_shape, tf.pad(tf.shape(sample_shape), paddings=[[pad, 0]], constant_values=1)) samples = sample_n_fn(tf.reduce_prod(sample_shape)) batch_event_shape = tf.shape(samples)[1:] final_shape = tf.concat([sample_shape, batch_event_shape], 0) return tf.reshape(samples, final_shape)
Example #11
Source File: utils.py From lamb with Apache License 2.0 | 6 votes |
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None): """Convert a length scalar to a vector of binary masks. This function will convert a vector of lengths to a matrix of binary masks. E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]] Args: lengths: a d-dimensional vector of integers corresponding to lengths. max_length: an optional (default: None) scalar-like or 0-dimensional tensor indicating the maximum length of the masks. If not provided, the maximum length will be inferred from the lengths vector. dtype: the dtype of the returned mask, if specified. If None, the dtype of the lengths will be used. name: a name for the operation (optional). Returns: A d x max_length tensor of binary masks (int32). """ with tf.name_scope(name, 'mask_from_lengths'): dtype = lengths.dtype if dtype is None else dtype max_length = tf.reduce_max(lengths) if max_length is None else max_length indexes = tf.range(max_length, dtype=lengths.dtype) mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1)) cast_mask = tf.cast(mask, dtype) return tf.stop_gradient(cast_mask)
Example #12
Source File: neural_stack.py From tensor2tensor with Apache License 2.0 | 6 votes |
def build_controller(self): """Create the RNN and output projections for controlling the stack. """ with tf.name_scope("controller"): self.rnn = contrib.rnn().BasicRNNCell(self._num_units) self._input_proj = self.add_variable( "input_projection_weights", shape=[self._embedding_size * (self._num_read_heads + 1), self._num_units], dtype=self.dtype) self._input_bias = self.add_variable( "input_projection_bias", shape=[self._num_units], initializer=tf.zeros_initializer(dtype=self.dtype)) self._push_proj, self._push_bias = self.add_scalar_projection( "push", self._num_write_heads) self._pop_proj, self._pop_bias = self.add_scalar_projection( "pop", self._num_write_heads) self._value_proj, self._value_bias = self.add_vector_projection( "value", self._num_write_heads) self._output_proj, self._output_bias = self.add_vector_projection( "output", 1)
Example #13
Source File: ppo_learner.py From tensor2tensor with Apache License 2.0 | 6 votes |
def evaluate(self, env_fn, hparams, sampling_temp): with tf.Graph().as_default(): with tf.name_scope("rl_eval"): eval_env = env_fn(in_graph=True) (collect_memory, _, collect_init) = _define_collect( eval_env, hparams, "ppo_eval", eval_phase=True, frame_stack_size=self.frame_stack_size, force_beginning_resets=False, sampling_temp=sampling_temp, distributional_size=self._distributional_size, ) model_saver = tf.train.Saver( tf.global_variables(hparams.policy_network + "/.*") # tf.global_variables("clean_scope.*") # Needed for sharing params. ) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) collect_init(sess) trainer_lib.restore_checkpoint(self.agent_model_dir, model_saver, sess) sess.run(collect_memory)
Example #14
Source File: t2t_model.py From tensor2tensor with Apache License 2.0 | 6 votes |
def summarize_features(features, num_shards=1): """Generate summaries for features.""" if not common_layers.should_generate_summaries(): return with tf.name_scope("input_stats"): for (k, v) in sorted(six.iteritems(features)): if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and (v.dtype != tf.string)): tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) nonpadding = tf.to_float(tf.not_equal(v, 0)) nonpadding_tokens = tf.reduce_sum(nonpadding) tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) tf.summary.scalar("%s_nonpadding_fraction" % k, tf.reduce_mean(nonpadding))
Example #15
Source File: benchmark_cnn.py From benchmarks with Apache License 2.0 | 6 votes |
def _benchmark_train(self): """Run cnn in benchmark mode. Skip the backward pass if forward_only is on. Returns: Dictionary containing training statistics (num_workers, num_steps, average_wall_time, images_per_sec). """ graph = tf.Graph() with graph.as_default(): build_result = self._build_graph() if self.mode == constants.BenchmarkMode.TRAIN_AND_EVAL: with self.variable_mgr.reuse_variables(): with tf.name_scope('Evaluation') as ns: eval_build_results = self._build_eval_graph(ns) else: eval_build_results = None (graph, result_to_benchmark) = self._preprocess_graph(graph, build_result) with graph.as_default(): return self._benchmark_graph(result_to_benchmark, eval_build_results)
Example #16
Source File: seq2seq.py From magenta with Apache License 2.0 | 5 votes |
def next_inputs(self, time, outputs, state, sample_ids, name=None): with tf.name_scope( name, "%sNextInputs" % type(self).__name__, (time, outputs, state)): return self._next_inputs_fn( time=time, outputs=outputs, state=state, sample_ids=sample_ids)
Example #17
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def global_pool_1d(inputs, pooling_type="MAX", mask=None): """Pool elements across the last dimension. Useful to convert a list of vectors into a single vector so as to get a representation of a set. Args: inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. pooling_type: the pooling type to use, MAX or AVR mask: A tensor of shape [batch_size, sequence_length] containing a mask for the inputs with 1's for existing elements, and 0's elsewhere. Returns: A tensor of shape [batch_size, input_dims] containing the sequences of transformed vectors. """ with tf.name_scope("global_pool", values=[inputs]): if mask is not None: mask = tf.expand_dims(mask, axis=2) inputs = tf.multiply(inputs, mask) if pooling_type == "MAX": # A tf.pool can be used here, but reduce is cleaner output = tf.reduce_max(inputs, axis=1) elif pooling_type == "AVR": if mask is not None: # Some elems are dummy elems so we can't just reduce the average. output = tf.reduce_sum(inputs, axis=1) num_elems = tf.reduce_sum(mask, axis=1, keepdims=True) output = tf.div(output, tf.maximum(num_elems, 1)) else: output = tf.reduce_mean(inputs, axis=1) return output
Example #18
Source File: tf_utils.py From magenta with Apache License 2.0 | 5 votes |
def log_loss(labels, predictions, epsilon=1e-7, scope=None, weights=None): """Calculate log losses. Same as tf.losses.log_loss except that this returns the individual losses instead of passing them into compute_weighted_loss and returning their weighted mean. This is useful for eval jobs that report the mean loss. By returning individual losses, that mean loss can be the same regardless of batch size. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. weights: Weights to apply to labels. Returns: A `Tensor` representing the loss values. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels`. """ with tf.name_scope(scope, "log_loss", (predictions, labels)): predictions = tf.to_float(predictions) labels = tf.to_float(labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = -tf.multiply(labels, tf.log(predictions + epsilon)) - tf.multiply( (1 - labels), tf.log(1 - predictions + epsilon)) if weights is not None: losses = tf.multiply(losses, weights) return losses
Example #19
Source File: train.py From magenta with Apache License 2.0 | 5 votes |
def main(unused_argv): tf.logging.set_verbosity(FLAGS.log) if not tf.gfile.Exists(FLAGS.logdir): tf.gfile.MakeDirs(FLAGS.logdir) with tf.Graph().as_default(): # If ps_tasks is 0, the local device is used. When using multiple # (non-local) replicas, the ReplicaDeviceSetter distributes the variables # across the different devices. model = utils.get_module("baseline.models.%s" % FLAGS.model) hparams = model.get_hparams(FLAGS.config) # Run the Reader on the CPU if FLAGS.ps_tasks: cpu_device = "/job:worker/cpu:0" else: cpu_device = "/job:localhost/replica:0/task:0/cpu:0" with tf.device(cpu_device): with tf.name_scope("Reader"): batch = reader.NSynthDataset( FLAGS.train_path, is_training=True).get_baseline_batch(hparams) with tf.device(tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks)): train_op = model.train_op(batch, hparams, FLAGS.config) # Run training slim.learning.train( train_op=train_op, logdir=FLAGS.logdir, master=FLAGS.master, is_chief=FLAGS.task == 0, number_of_steps=hparams.max_steps, save_summaries_secs=FLAGS.save_summaries_secs, save_interval_secs=FLAGS.save_interval_secs)
Example #20
Source File: seq2seq.py From magenta with Apache License 2.0 | 5 votes |
def sample(self, time, outputs, state, name=None): with tf.name_scope( name, "%sSample" % type(self).__name__, (time, outputs, state)): return self._sample_fn(time=time, outputs=outputs, state=state)
Example #21
Source File: seq2seq.py From magenta with Apache License 2.0 | 5 votes |
def __init__(self, inputs, sequence_length, time_major=False, name=None): """Initializer. Args: inputs: A (structure of) input tensors. sequence_length: An int32 vector tensor. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. name: Name scope for any created operations. Raises: ValueError: if `sequence_length` is not a 1D tensor. """ with tf.name_scope(name, "TrainingHelper", [inputs, sequence_length]): inputs = tf.convert_to_tensor(inputs, name="inputs") self._inputs = inputs if not time_major: inputs = tf.nest.map_structure(_transpose_batch_time, inputs) self._input_tas = tf.nest.map_structure(_unstack_ta, inputs) self._sequence_length = tf.convert_to_tensor( sequence_length, name="sequence_length") if self._sequence_length.get_shape().ndims != 1: raise ValueError( "Expected sequence_length to be a vector, but received shape: %s" % self._sequence_length.get_shape()) self._zero_inputs = tf.nest.map_structure( lambda inp: tf.zeros_like(inp[0, :]), inputs) self._batch_size = tf.size(sequence_length)
Example #22
Source File: latent_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def compute_nats_and_bits_per_dim(data_dim, latent_dim, average_reconstruction, average_prior): """Computes negative ELBO, which is an upper bound on the negative likelihood. Args: data_dim: int-like indicating data dimensionality. latent_dim: int-like indicating latent dimensionality. average_reconstruction: Scalar Tensor indicating the reconstruction cost averaged over all data dimensions and any data batches. average_prior: Scalar Tensor indicating the negative log-prior probability averaged over all latent dimensions and any data batches. Returns: Tuple of scalar Tensors, representing the nats and bits per data dimension (e.g., subpixels) respectively. """ with tf.name_scope(None, default_name="compute_nats_per_dim"): data_dim = tf.cast(data_dim, average_reconstruction.dtype) latent_dim = tf.cast(latent_dim, average_prior.dtype) negative_log_likelihood = data_dim * average_reconstruction negative_log_prior = latent_dim * average_prior negative_elbo = negative_log_likelihood + negative_log_prior nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim") bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim") return nats_per_dim, bits_per_dim
Example #23
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def pad_with_zeros(logits, labels): """Pad labels on the length dimension to match logits length.""" with tf.name_scope("pad_with_zeros", values=[logits, labels]): logits, labels = pad_to_same_length(logits, labels) if len(labels.shape) == 3: # 2-d labels. logits, labels = pad_to_same_length(logits, labels, axis=2) return logits, labels
Example #24
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1): """Pad tensors x and y on axis 1 so that they have the same length.""" if axis not in [1, 2]: raise ValueError("Only axis=1 and axis=2 supported for now.") with tf.name_scope("pad_to_same_length", values=[x, y]): x_length = shape_list(x)[axis] y_length = shape_list(y)[axis] if (isinstance(x_length, int) and isinstance(y_length, int) and x_length == y_length and final_length_divisible_by == 1): return x, y max_length = tf.maximum(x_length, y_length) if final_length_divisible_by > 1: # Find the nearest larger-or-equal integer divisible by given number. max_length += final_length_divisible_by - 1 max_length //= final_length_divisible_by max_length *= final_length_divisible_by length_diff1 = max_length - x_length length_diff2 = max_length - y_length def padding_list(length_diff, arg): if axis == 1: return [[[0, 0], [0, length_diff]], tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)] return [[[0, 0], [0, 0], [0, length_diff]], tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)] paddings1 = tf.concat(padding_list(length_diff1, x), axis=0) paddings2 = tf.concat(padding_list(length_diff2, y), axis=0) res_x = tf.pad(x, paddings1) res_y = tf.pad(y, paddings2) # Static shapes are the same except for axis=1. x_shape = x.shape.as_list() x_shape[axis] = None res_x.set_shape(x_shape) y_shape = y.shape.as_list() y_shape[axis] = None res_y.set_shape(y_shape) return res_x, res_y
Example #25
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def noam_norm(x, epsilon=1.0, name=None): """One version of layer normalization.""" with tf.name_scope(name, default_name="noam_norm", values=[x]): shape = x.get_shape() ndims = len(shape) return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt( to_float(shape[-1])))
Example #26
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def standardize_images(x): """Image standardization on batches and videos.""" with tf.name_scope("standardize_images", values=[x]): x_shape = shape_list(x) x = to_float(tf.reshape(x, [-1] + x_shape[-3:])) x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True) x_variance = tf.reduce_mean( tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True) num_pixels = to_float(x_shape[-2] * x_shape[-3]) x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels)) return tf.reshape(x, x_shape)
Example #27
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def convert_real_to_rgb(x): """Conversion of real numbers to pixel values.""" with tf.name_scope("real_to_rgb", values=[x]): x *= 255.0 return x
Example #28
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def convert_rgb_to_symmetric_real(x): """Conversion of pixel values to real numbers.""" with tf.name_scope("rgb_to_real", values=[x]): x = to_float(x) # Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in # the range [-1, 1]. x = (x / 127.5) - 1 return x
Example #29
Source File: common_layers.py From tensor2tensor with Apache License 2.0 | 5 votes |
def convert_rgb_to_real(x): """Conversion of pixel values to real numbers.""" with tf.name_scope("rgb_to_real", values=[x]): x = to_float(x) x /= 255.0 return x
Example #30
Source File: modalities.py From tensor2tensor with Apache License 2.0 | 5 votes |
def real_log_poisson_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Poisson loss for real.""" del model_hparams, vocab_size # unused arg predictions = top_out if (len(common_layers.shape_list(top_out)) != len( common_layers.shape_list(targets))): predictions = tf.squeeze(top_out, axis=[-1]) with tf.name_scope("log_possion"): weights = weights_fn(targets) lp_loss = tf.nn.log_poisson_loss(targets, predictions) return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)