Python tensorflow.to_int64() Examples
The following are 30
code examples of tensorflow.to_int64().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: common_layers.py From fine-lm with MIT License | 6 votes |
def argmax_with_score(logits, axis=None): """Argmax along with the value.""" axis = axis or len(logits.get_shape()) - 1 predictions = tf.argmax(logits, axis=axis) logits_shape = shape_list(logits) prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1] prefix_size = 1 for d in prefix_shape: prefix_size *= d # Flatten to extract scores flat_logits = tf.reshape(logits, [prefix_size, vocab_size]) flat_predictions = tf.reshape(predictions, [prefix_size]) flat_indices = tf.stack( [tf.range(tf.to_int64(prefix_size)), tf.to_int64(flat_predictions)], axis=1) flat_scores = tf.gather_nd(flat_logits, flat_indices) # Unflatten scores = tf.reshape(flat_scores, prefix_shape) return predictions, scores
Example #2
Source File: lstm.py From iwcs2017-answer-selection with Apache License 2.0 | 6 votes |
def bilstm_representation_raw(self, item, indices, re_use_lstm, name='lstm'): """Creates a representation graph which retrieves a text item (represented by its word embeddings) and returns a vector-representation :param item: the text item. Can be question or (good/bad) answer :param sequence_length: maximum length of the text item :param re_use_lstm: should be False for the first call, True for al subsequent ones to get the same lstm variables :return: representation tensor """ tensor_non_zero_token = non_zero_tokens(tf.to_float(indices)) sequence_length = tf.to_int64(tf.reduce_sum(tensor_non_zero_token, 1)) with tf.variable_scope(name, reuse=re_use_lstm): output, _last = tf.nn.bidirectional_dynamic_rnn( self.lstm_cell_forward, self.lstm_cell_backward, item, dtype=tf.float32, sequence_length=sequence_length ) return tf.concat(2, output)
Example #3
Source File: imgconvnets.py From DmsMsgRcg with Apache License 2.0 | 6 votes |
def _build_training_graph(self, logits, labels, learning_rate): """ Build the training graph. Args: logits: Logits tensor, float - [batch_size, class_count]. labels: Labels tensor, int32 - [batch_size], with values in the range [0, class_count). learning_rate: The learning rate for the optimization. Returns: train_op: The Op for training. loss: The Op for calculating loss. """ # Create an operation that calculates loss. labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss) correct_predict = tf.nn.in_top_k(logits, labels, 1) accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32)) return train_op, loss, accuracy
Example #4
Source File: common_layers.py From BERT with Apache License 2.0 | 6 votes |
def argmax_with_score(logits, axis=None): """Argmax along with the value.""" axis = axis or len(logits.get_shape()) - 1 predictions = tf.argmax(logits, axis=axis) logits_shape = shape_list(logits) prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1] prefix_size = 1 for d in prefix_shape: prefix_size *= d # Flatten to extract scores flat_logits = tf.reshape(logits, [prefix_size, vocab_size]) flat_predictions = tf.reshape(predictions, [prefix_size]) flat_indices = tf.stack( [tf.range(tf.to_int64(prefix_size)), tf.to_int64(flat_predictions)], axis=1) flat_scores = tf.gather_nd(flat_logits, flat_indices) # Unflatten scores = tf.reshape(flat_scores, prefix_shape) return predictions, scores
Example #5
Source File: RankLSTM_model.py From Deep-Listwise-Context-Model-for-Ranking-Refinement with Apache License 2.0 | 6 votes |
def listMLE(self, output, target_indexs, target_rels, name=None): loss = None with ops.name_scope(name, "listMLE",[output] + target_indexs + target_rels): output = tf.nn.l2_normalize(output, 1) loss = -1.0 * math_ops.reduce_sum(output,1) print(loss.get_shape()) exp_output = tf.exp(output) exp_output_table = tf.reshape(exp_output,[-1]) print(exp_output.get_shape()) print(exp_output_table.get_shape()) sum_exp_output = math_ops.reduce_sum(exp_output,1) loss = tf.add(loss, tf.log(sum_exp_output)) #compute MLE for i in xrange(self.rank_list_size-1): idx = target_indexs[i] + tf.to_int64(self.batch_index_bias) y_i = embedding_ops.embedding_lookup(exp_output_table, idx) #y_i = tf.gather_nd(exp_output, idx) sum_exp_output = tf.subtract(sum_exp_output, y_i) loss = tf.add(loss, tf.log(sum_exp_output)) batch_size = tf.shape(target_rels[0])[0] return math_ops.reduce_sum(loss) / math_ops.cast(batch_size, dtypes.float32)
Example #6
Source File: imagenet.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def preprocess_example(self, example, mode, unused_hparams): example["inputs"].set_shape([_IMAGENET_MEDIUM_IMAGE_SIZE, _IMAGENET_MEDIUM_IMAGE_SIZE, 3]) example["inputs"] = tf.to_int64(example["inputs"]) return example
Example #7
Source File: estimator.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def build_graph_dist_strategy(self, features, labels, mode, params): """Model function.""" del labels, params misc_utils.print_out("Running dist_strategy mode_fn") hparams = self.hparams # Create a GNMT model for training. # assert (hparams.encoder_type == "gnmt" or # hparams.attention_architecture in ["gnmt", "gnmt_v2"]) with mixed_precision_scope(): model = gnmt_model.GNMTModel(hparams, mode=mode, features=features) if mode == tf.contrib.learn.ModeKeys.INFER: sample_ids = model.sample_id reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file( hparams.tgt_vocab_file, default_value=vocab_utils.UNK) sample_words = reverse_target_vocab_table.lookup( tf.to_int64(sample_ids)) # make sure outputs is of shape [batch_size, time] or [beam_width, # batch_size, time] when using beam search. if hparams.time_major: sample_words = tf.transpose(sample_words) elif sample_words.shape.ndims == 3: # beam search output in [batch_size, time, beam_width] shape. sample_words = tf.transpose(sample_words, [2, 0, 1]) predictions = {"predictions": sample_words} # return loss, vars, grads, predictions, train_op, scaffold return None, None, None, predictions, None, None elif mode == tf.contrib.learn.ModeKeys.TRAIN: loss = model.train_loss train_op = model.update return loss, model.params, model.grads, None, train_op, None else: raise ValueError("Unknown mode in model_fn: %s" % mode)
Example #8
Source File: model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_attention_mechanism( num_units, memory, source_sequence_length, dtype=None): """Create attention mechanism based on the attention_option.""" # Mechanism attention_mechanism = attention_wrapper.BahdanauAttention( num_units, memory, memory_sequence_length=tf.to_int64(source_sequence_length), normalize=True, dtype=dtype) return attention_mechanism
Example #9
Source File: estimator.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _convert_ids_to_strings(tgt_vocab_file, ids): """Convert prediction ids to words.""" with tf.Session() as sess: reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file( tgt_vocab_file, default_value=vocab_utils.UNK) sess.run(tf.tables_initializer()) translations = sess.run( reverse_target_vocab_table.lookup( tf.to_int64(tf.convert_to_tensor(np.asarray(ids))))) return translations
Example #10
Source File: imagenet.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def preprocess_example(self, example, mode, _): # Just resize with area. if self._was_reversed: example["inputs"] = tf.to_int64( tf.image.resize_images(example["inputs"], self.rescale_size, tf.image.ResizeMethod.AREA)) else: example = imagenet_preprocess_example(example, mode) example["inputs"] = tf.to_int64( tf.image.resize_images(example["inputs"], self.rescale_size)) return example
Example #11
Source File: model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_attention_mechanism( num_units, memory, source_sequence_length, dtype=None): """Create attention mechanism based on the attention_option.""" # Mechanism attention_mechanism = attention_wrapper.BahdanauAttention( num_units, memory, memory_sequence_length=tf.to_int64(source_sequence_length), normalize=True, dtype=dtype) return attention_mechanism
Example #12
Source File: imagenet.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def preprocess_example(self, example, mode, unused_hparams): example["inputs"].set_shape([_IMAGENET_SMALL_IMAGE_SIZE, _IMAGENET_SMALL_IMAGE_SIZE, 3]) example["inputs"] = tf.to_int64(example["inputs"]) return example
Example #13
Source File: data.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def metric_fn(answers, prediction, start, end, yp1, yp2, num_answers): """Compute span accuracies and token F1/EM scores.""" yp1 = tf.expand_dims(yp1, -1) yp2 = tf.expand_dims(yp2, -1) answer_mask = tf.sequence_mask(num_answers) start = tf.to_int64(start) end = tf.to_int64(end) start_correct = tf.reduce_any(tf.equal(start, yp1) & answer_mask, 1) end_correct = tf.reduce_any(tf.equal(end, yp2) & answer_mask, 1) correct = start_correct & end_correct em = tf.py_func( enum_fn(_exact_match_score, dtype='float32'), [prediction, answers, answer_mask], 'float32') f1 = tf.py_func( enum_fn(_f1_score, dtype='float32'), [prediction, answers, answer_mask], 'float32') eval_metric_ops = { # TODO(ddohan): Add other useful metrics 'acc_start': tf.metrics.mean(tf.cast(start_correct, 'float')), 'acc_end': tf.metrics.mean(tf.cast(end_correct, 'float')), 'acc_span': tf.metrics.mean(tf.cast(correct, 'float')), 'em': tf.metrics.mean(em), 'f1': tf.metrics.mean(f1), # Number of questions processed 'num_question': tf.metrics.true_positives( tf.ones([tf.shape(prediction)][0]), tf.ones([tf.shape(prediction)][0])) } return eval_metric_ops
Example #14
Source File: estimator.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _convert_ids_to_strings(tgt_vocab_file, ids): """Convert prediction ids to words.""" with tf.Session() as sess: reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file( tgt_vocab_file, default_value=vocab_utils.UNK) sess.run(tf.tables_initializer()) translations = sess.run( reverse_target_vocab_table.lookup( tf.to_int64(tf.convert_to_tensor(np.asarray(ids))))) return translations
Example #15
Source File: model.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def create_attention_mechanism( num_units, memory, source_sequence_length, dtype=None): """Create attention mechanism based on the attention_option.""" # Mechanism attention_mechanism = attention_wrapper.BahdanauAttention( num_units, memory, memory_sequence_length=tf.to_int64(source_sequence_length), normalize=True, dtype=dtype) return attention_mechanism
Example #16
Source File: estimator.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def build_graph_dist_strategy(self, features, labels, mode, params): """Model function.""" del labels, params misc_utils.print_out("Running dist_strategy mode_fn") hparams = self.hparams # Create a GNMT model for training. # assert (hparams.encoder_type == "gnmt" or # hparams.attention_architecture in ["gnmt", "gnmt_v2"]) with mixed_precision_scope(): model = gnmt_model.GNMTModel(hparams, mode=mode, features=features) if mode == tf.contrib.learn.ModeKeys.INFER: sample_ids = model.sample_id reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file( hparams.tgt_vocab_file, default_value=vocab_utils.UNK) sample_words = reverse_target_vocab_table.lookup( tf.to_int64(sample_ids)) # make sure outputs is of shape [batch_size, time] or [beam_width, # batch_size, time] when using beam search. if hparams.time_major: sample_words = tf.transpose(sample_words) elif sample_words.shape.ndims == 3: # beam search output in [batch_size, time, beam_width] shape. sample_words = tf.transpose(sample_words, [2, 0, 1]) predictions = {"predictions": sample_words} # return loss, vars, grads, predictions, train_op, scaffold return None, None, None, predictions, None, None elif mode == tf.contrib.learn.ModeKeys.TRAIN: loss = model.train_loss train_op = model.update return loss, model.params, model.grads, None, train_op, None else: raise ValueError("Unknown mode in model_fn: %s" % mode)
Example #17
Source File: image_utils.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def make_multiscale_dilated(image, resolutions, num_channels=3): """Returns list of scaled images, one for each resolution. Resizes by skipping every nth pixel. Args: image: Tensor of shape [height, height, num_channels]. resolutions: List of heights that image's height is resized to. The function assumes VALID padding, so the original image's height must be divisible by each resolution's height to return the exact resolution size. num_channels: Number of channels in image. Returns: List of Tensors, one for each resolution with shape given by [resolutions[i], resolutions[i], num_channels] if resolutions properly divide the original image's height; otherwise shape height and width is up to valid skips. """ image_height = common_layers.shape_list(image)[0] scaled_images = [] for height in resolutions: dilation_rate = image_height // height # assuming height = width scaled_image = image[::dilation_rate, ::dilation_rate] scaled_image = tf.to_int64(scaled_image) scaled_image.set_shape([None, None, num_channels]) scaled_images.append(scaled_image) return scaled_images
Example #18
Source File: common_layers.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def index_last_dim_with_indices(x, indices): """Use indices to index into the last axis of x. This can be useful for recovering the actual probabilities of a sample from a probability distribution. Args: x: Tensor, n-d. indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1) dimensions of x. The values of indices will be used to index into the last axis of x. Returns: Tensor, (n-1)-d. """ assert len(x.shape) == len(indices.shape) + 1 x_shape = shape_list(x) vocab_size = x_shape[-1] flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size]) flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])]) idx = tf.stack( [ tf.range(tf.to_int64(shape_list(flat_indices)[0])), tf.to_int64(flat_indices) ], axis=1) flat_x_idx = tf.gather_nd(flat_x, idx) x_idx = tf.reshape(flat_x_idx, x_shape[:-1]) return x_idx
Example #19
Source File: estimator.py From training_results_v0.5 with Apache License 2.0 | 5 votes |
def _convert_ids_to_strings(tgt_vocab_file, ids): """Convert prediction ids to words.""" with tf.Session() as sess: reverse_target_vocab_table = lookup_ops.index_to_string_table_from_file( tgt_vocab_file, default_value=vocab_utils.UNK) sess.run(tf.tables_initializer()) translations = sess.run( reverse_target_vocab_table.lookup( tf.to_int64(tf.convert_to_tensor(np.asarray(ids))))) return translations
Example #20
Source File: realmix.py From realmix with Apache License 2.0 | 5 votes |
def percent_confidence_mask_unsup(self, logits_y, labels_y, loss_l2u): # Adapted from google-research/uda/image/main.py # This function masks the unsupervised predictions that are below # a set confidence threshold. # Note the following will only work # using MSE loss and not KL-divergence. # Calculate largest predicted probability for each image. unsup_prob = tf.nn.softmax(logits_y, axis=-1) largest_prob = tf.reduce_max(unsup_prob, axis=-1) # Get the indices of the bottom x% of probabilities and mask those out. # In other words, get the probability of the image with the x%*#numofsamples # lowest probability and use that as the mask. # Calculate the current confidence_mask value using the specified schedule: sorted_probs = tf.sort(largest_prob, axis=-1, direction='ASCENDING') sort_index = tf.math.multiply(tf.to_float(tf.shape(sorted_probs)[0]), FLAGS.percent_mask) curr_confidence_mask = tf.slice(sorted_probs, [tf.to_int64(sort_index)], [1]) # Mask the loss for images that don't contain a predicted # probability above the threshold. loss_mask = tf.cast(tf.greater(largest_prob, curr_confidence_mask), tf.float32) tf.summary.scalar('losses/high_prob_ratio', tf.reduce_mean(loss_mask)) # The ratio of unl images above the thresh tf.summary.scalar('losses/percent_confidence_mask', tf.reshape(curr_confidence_mask,[])) loss_mask = tf.stop_gradient(loss_mask) loss_l2u = loss_l2u * tf.expand_dims(loss_mask, axis=-1) # Return the average unsupervised loss. avg_unsup_loss = (tf.reduce_sum(loss_l2u) / tf.maximum(tf.reduce_sum(loss_mask) * FLAGS.nclass, 1)) return avg_unsup_loss
Example #21
Source File: fsns.py From yolo_v2 with Apache License 2.0 | 5 votes |
def tensors_to_item(self, keys_to_tensors): return tf.to_int64( self._num_of_views * keys_to_tensors[self._original_width_key] / keys_to_tensors[self._width_key])
Example #22
Source File: model.py From yolo_v2 with Apache License 2.0 | 5 votes |
def get_text(self, ids): """Returns a string corresponding to a sequence of character ids. Args: ids: a tensor with shape [batch_size, max_sequence_length] """ return tf.reduce_join( self.table.lookup(tf.to_int64(ids)), reduction_indices=1)
Example #23
Source File: RankLSTM_model.py From Deep-Listwise-Context-Model-for-Ranking-Refinement with Apache License 2.0 | 5 votes |
def _extract_argmax_and_embed(self,embedding, output_projection=None, update_embedding=True): """Get a loop_function that extracts the previous symbol and embeds it. Args: embedding: embedding tensor for symbols. output_projection: None or a pair (W, B). If provided, each fed previous output will first be multiplied by W and added B. update_embedding: Boolean; if False, the gradients will not propagate through the embeddings. Returns: A loop function. """ def loop_function(prev, _): if output_projection is not None: prev = nn_ops.xw_plus_b( prev, output_projection[0], output_projection[1]) prev_symbol = math_ops.argmax(prev, 1) + tf.to_int64(self.batch_index_bias) # Note that gradients will not propagate through the second parameter of # embedding_lookup. emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol) if not update_embedding: emb_prev = tf.stop_gradient(emb_prev) return emb_prev return loop_function
Example #24
Source File: models.py From ML_CIA with MIT License | 5 votes |
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_w=0, l2_v=0, random_seed=None): Model.__init__(self) init_vars = [('w', [input_dim, output_dim], 'xavier', dtype), ('v', [input_dim, factor_order], 'xavier', dtype), ('b', [output_dim], 'zero', dtype)] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = tf.sparse_placeholder(dtype) self.y = tf.placeholder(dtype) self.vars = utils.init_var_map(init_vars, init_path) w = self.vars['w'] v = self.vars['v'] b = self.vars['b'] X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X))) xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, v)) p = 0.5 * tf.reshape( tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(v)), 1), [-1, output_dim]) xw = tf.sparse_tensor_dense_matmul(self.X, w) logits = tf.reshape(xw + b + p, [-1]) self.y_prob = tf.sigmoid(logits) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \ l2_w * tf.nn.l2_loss(xw) + \ l2_v * tf.nn.l2_loss(xv) self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
Example #25
Source File: evaluation.py From TF-SegNet with MIT License | 5 votes |
def evaluation(logits, labels): labels = tf.to_int64(labels) correct_prediction = tf.equal(tf.argmax(logits, 3), labels) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', accuracy) return accuracy
Example #26
Source File: context_encoder.py From neural-el with Apache License 2.0 | 5 votes |
def get_last_output(self, outputs, lengths, name): reverse_output = tf.reverse_sequence(input=outputs, seq_lengths=tf.to_int64(lengths), seq_dim=1, batch_dim=0) en_last_output = tf.slice(input_=reverse_output, begin=[0,0,0], size=[self.batch_size, 1, -1]) # [batch_size, h_dim] encoder_last_output = tf.reshape(en_last_output, shape=[self.batch_size, -1], name=name) return encoder_last_output
Example #27
Source File: mnist.py From MachineLearning with Apache License 2.0 | 5 votes |
def loss(logits, labels): """Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
Example #28
Source File: mnist_simple.py From MachineLearning with Apache License 2.0 | 5 votes |
def loss(logits, labels): """Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss
Example #29
Source File: imagenet.py From BERT with Apache License 2.0 | 5 votes |
def preprocess_example(self, example, mode, unused_hparams): example["inputs"].set_shape( [_IMAGENET_MEDIUM_IMAGE_SIZE, _IMAGENET_MEDIUM_IMAGE_SIZE, 3]) example["inputs"] = tf.to_int64(example["inputs"]) example["inputs"] = tf.reshape(example["inputs"], (-1,)) del example["targets"] # Ensure unconditional generation return example
Example #30
Source File: layers.py From ARU-Net with GNU General Public License v2.0 | 5 votes |
def horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=1.0, scope=None): """Run an LSTM bidirectionally over all the rows of each image. Args: images: (num_images, height, width, depth) tensor num_filters_out: output depth scope: optional scope name Returns: (num_images, height, width, num_filters_out) tensor, where """ with tf.variable_scope(scope, "HorizontalGru", [images]): sequence = images_to_sequence(images) shapeT = tf.shape(sequence) sequence_length = shapeT[0] batch_sizeRNN = shapeT[1] sequence_lengths = tf.to_int64( tf.fill([batch_sizeRNN], sequence_length)) forward_drop1 = DropoutWrapper(cell_fw, output_keep_prob=keep_prob) backward_drop1 = DropoutWrapper(cell_bw, output_keep_prob=keep_prob) rnn_out1, _ = tf.nn.bidirectional_dynamic_rnn(forward_drop1, backward_drop1, sequence, dtype=tf.float32, sequence_length=sequence_lengths, time_major=True, swap_memory=True, scope=scope) rnn_out1 = tf.concat(rnn_out1, 2) rnn_out1 = tf.reshape(rnn_out1, shape=[-1, batch_sizeRNN, 2, num_filters_out]) output_sequence = tf.reduce_sum(rnn_out1, axis=2) batch_size=tf.shape(images)[0] output = sequence_to_images(output_sequence, batch_size) return output