Python tensorflow.python.ops.array_ops.transpose() Examples
The following are 30
code examples of tensorflow.python.ops.array_ops.transpose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.array_ops
, or try the search function
.
Example #1
Source File: pooling.py From lambda-packs with MIT License | 6 votes |
def call(self, inputs): pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) if self.data_format == 'channels_first': # TF does not support `channels_first` with 3D pooling operations, # so we must handle this case manually. # TODO(fchollet): remove this when TF pooling is feature-complete. inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1)) outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper()) if self.data_format == 'channels_first': outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3)) return outputs
Example #2
Source File: core.py From lambda-packs with MIT License | 6 votes |
def impose_axis_order(labeled_tensor, axis_order=None, name=None): """Impose desired axis order on a labeled tensor. Args: labeled_tensor: The input tensor. axis_order: Optional desired axis order, as a list of names. If not provided, defaults to the current axis_order_scope (if set). name: Optional op name. Returns: Labeled tensor with possibly transposed axes. Raises: AxisOrderError: If no axis_order is provided or axis_order does not contain all axes on the input tensor. """ with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope: labeled_tensor = convert_to_labeled_tensor(labeled_tensor) if axis_order is None: axis_order = _get_valid_axis_order() relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes] return transpose(labeled_tensor, relevant_axis_order, name=scope)
Example #3
Source File: mvn.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _sample_n(self, n, seed=None): # Recall _assert_valid_mu ensures mu and self._cov have same batch shape. shape = array_ops.concat([self._cov.vector_shape(), [n]], 0) white_samples = random_ops.random_normal(shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed) correlated_samples = self._cov.sqrt_matmul(white_samples) # Move the last dimension to the front perm = array_ops.concat( (array_ops.stack([array_ops.rank(correlated_samples) - 1]), math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0) # TODO(ebrevdo): Once we get a proper tensor contraction op, # perform the inner product using that instead of batch_matmul # and this slow transpose can go away! correlated_samples = array_ops.transpose(correlated_samples, perm) samples = correlated_samples + self.mu return samples
Example #4
Source File: lstm2d.py From lambda-packs with MIT License | 6 votes |
def reduce_to_sequence(images, num_filters_out, scope=None): """Reduce an image to a sequence by scanning an LSTM vertically. Args: images: (num_images, height, width, depth) tensor num_filters_out: output layer depth scope: optional scope name Returns: A (width, num_images, num_filters_out) sequence. """ with variable_scope.variable_scope(scope, "ReduceToSequence", [images]): batch_size, height, width, depth = _shape(images) transposed = array_ops.transpose(images, [1, 0, 2, 3]) reshaped = array_ops.reshape(transposed, [height, batch_size * width, depth]) reduced = lstm1d.sequence_to_final(reshaped, num_filters_out) output = array_ops.reshape(reduced, [batch_size, width, num_filters_out]) return output
Example #5
Source File: multinomial.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.n, dtype=dtypes.int32) if self.n.get_shape().ndims is not None: if self.n.get_shape().ndims != 0: raise NotImplementedError( "Sample only supported for scalar number of draws.") elif self.validate_args: is_scalar = check_ops.assert_rank( n_draws, 0, message="Sample only supported for scalar number of draws.") n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws) k = self.event_shape()[0] # Flatten batch dims so logits has shape [B, k], # where B = reduce_prod(self.batch_shape()). logits = array_ops.reshape(self.logits, [-1, k]) draws = random_ops.multinomial(logits=logits, num_samples=n * n_draws, seed=seed) draws = array_ops.reshape(draws, shape=[-1, n, n_draws]) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), reduction_indices=-2) # shape: [B, n, k] x = array_ops.transpose(x, perm=[1, 0, 2]) final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #6
Source File: array_grad.py From lambda-packs with MIT License | 6 votes |
def _TileGrad(op, grad): """Sum reduces grad along the tiled dimensions.""" assert isinstance(grad, ops.Tensor) input_shape = array_ops.shape(op.inputs[0]) # We interleave multiples and input_shape to get split_shape, # reshape grad to split_shape, and reduce along all even # dimensions (the tiled dimensions) to get the result # with shape input_shape. For example # input_shape = [20, 30, 40] # multiples = [2, 3, 4] # split_shape = [2, 20, 3, 30, 4, 40] # axes = [0, 2, 4] split_shape = array_ops.reshape( array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1]) axes = math_ops.range(0, array_ops.size(split_shape), 2) input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes) # Fix shape inference input_grad.set_shape(op.inputs[0].get_shape()) return [input_grad, None]
Example #7
Source File: lstm2d.py From lambda-packs with MIT License | 6 votes |
def separable_lstm(images, num_filters_out, nhidden=None, scope=None): """Run bidirectional LSTMs first horizontally then vertically. Args: images: (num_images, height, width, depth) tensor num_filters_out: output layer depth nhidden: hidden layer depth scope: optional scope name Returns: (num_images, height, width, num_filters_out) tensor """ with variable_scope.variable_scope(scope, "SeparableLstm", [images]): if nhidden is None: nhidden = num_filters_out hidden = horizontal_lstm(images, nhidden) with variable_scope.variable_scope("vertical"): transposed = array_ops.transpose(hidden, [0, 2, 1, 3]) output_transposed = horizontal_lstm(transposed, num_filters_out) output = array_ops.transpose(output_transposed, [0, 2, 1, 3]) return output
Example #8
Source File: image_ops_impl.py From lambda-packs with MIT License | 6 votes |
def transpose_image(image): """Transpose an image by swapping the first and second dimension. See also `transpose()`. Args: image: 3-D tensor of shape `[height, width, channels]` Returns: A 3-D tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) return array_ops.transpose(image, [1, 0, 2], name='transpose_image')
Example #9
Source File: multinomial.py From lambda-packs with MIT License | 6 votes |
def _sample_n(self, n, seed=None): n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32) if self.total_count.get_shape().ndims is not None: if self.total_count.get_shape().ndims != 0: raise NotImplementedError( "Sample only supported for scalar number of draws.") elif self.validate_args: is_scalar = check_ops.assert_rank( n_draws, 0, message="Sample only supported for scalar number of draws.") n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws) k = self.event_shape_tensor()[0] # Flatten batch dims so logits has shape [B, k], # where B = reduce_prod(self.batch_shape_tensor()). draws = random_ops.multinomial( logits=array_ops.reshape(self.logits, [-1, k]), num_samples=n * n_draws, seed=seed) draws = array_ops.reshape(draws, shape=[-1, n, n_draws]) x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), axis=-2) # shape: [B, n, k] x = array_ops.transpose(x, perm=[1, 0, 2]) final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0) return array_ops.reshape(x, final_shape)
Example #10
Source File: local_cli_wrapper_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def setUp(self): self._tmp_dir = tempfile.mktemp() self.v = variables.Variable(10.0, name="v") self.delta = constant_op.constant(1.0, name="delta") self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v") self.ph = array_ops.placeholder(dtypes.float32, name="ph") self.xph = array_ops.transpose(self.ph, name="xph") self.m = constant_op.constant( [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m") self.y = math_ops.matmul(self.m, self.xph, name="y") self.sess = session.Session() # Initialize variable. self.sess.run(self.v.initializer)
Example #11
Source File: dynamic_decoder.py From tensorflow_end2end_speech_recognition with MIT License | 6 votes |
def _transpose_batch_time(x): """Transpose the batch and time dimensions of a Tensor. Retains as much of the static shape information as possible. Args: x: A tensor of rank 2 or higher. Returns: x transposed along the first two dimensions. Raises: ValueError: if `x` is rank 1 or lower. """ x_static_shape = x.get_shape() if x_static_shape.ndims is not None and x_static_shape.ndims < 2: raise ValueError( "Expected input tensor %s to have rank at least 2, but saw shape: %s" % (x, x_static_shape)) x_rank = array_ops.rank(x) x_t = array_ops.transpose( x, array_ops.concat( ([1, 0], math_ops.range(2, x_rank)), axis=0)) x_t.set_shape( tensor_shape.TensorShape([ x_static_shape[1].value, x_static_shape[0].value ]).concatenate(x_static_shape[2:])) return x_t
Example #12
Source File: lstm2d.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def separable_lstm(images, num_filters_out, nhidden=None, scope=None): """Run bidirectional LSTMs first horizontally then vertically. Args: images: (num_images, height, width, depth) tensor num_filters_out: output layer depth nhidden: hidden layer depth scope: optional scope name Returns: (num_images, height, width, num_filters_out) tensor """ with variable_scope.variable_scope(scope, "SeparableLstm", [images]): if nhidden is None: nhidden = num_filters_out hidden = horizontal_lstm(images, nhidden) with variable_scope.variable_scope("vertical"): transposed = array_ops.transpose(hidden, [0, 2, 1, 3]) output_transposed = horizontal_lstm(transposed, num_filters_out) output = array_ops.transpose(output_transposed, [0, 2, 1, 3]) return output
Example #13
Source File: lstm2d.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def sequence_to_images(tensor, num_image_batches): """Convert a batch of sequences into a batch of images. Args: tensor: (num_steps, num_batches, depth) sequence tensor num_image_batches: the number of image batches Returns: (num_images, height, width, depth) tensor """ width, num_batches, depth = _shape(tensor) height = num_batches // num_image_batches reshaped = array_ops.reshape(tensor, [width, num_image_batches, height, depth]) return array_ops.transpose(reshaped, [1, 2, 0, 3])
Example #14
Source File: test_util.py From lambda-packs with MIT License | 6 votes |
def NHWCToNCHW(input_tensor): """Converts the input from the NHWC format to NCHW. Args: input_tensor: a 4- or 5-D tensor, or an array representing shape Returns: converted tensor or shape array """ # tensor dim -> new axis order new_axes = { 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3] } if isinstance(input_tensor, ops.Tensor): ndims = input_tensor.shape.ndims return array_ops.transpose(input_tensor, new_axes[ndims]) else: ndims = len(input_tensor) return [input_tensor[a] for a in new_axes[ndims]]
Example #15
Source File: test_util.py From lambda-packs with MIT License | 6 votes |
def NCHWToNHWC(input_tensor): """Converts the input from the NCHW format to NHWC. Args: input_tensor: a 4- or 5-D tensor, or an array representing shape Returns: converted tensor or shape array """ # tensor dim -> new axis order new_axes = { 4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1] } if isinstance(input_tensor, ops.Tensor): ndims = input_tensor.shape.ndims return array_ops.transpose(input_tensor, new_axes[ndims]) else: ndims = len(input_tensor) return [input_tensor[a] for a in new_axes[ndims]]
Example #16
Source File: tfexample_decoder.py From lambda-packs with MIT License | 6 votes |
def tensors_to_item(self, keys_to_tensors): """Maps the given dictionary of tensors to a contatenated list of bboxes. Args: keys_to_tensors: a mapping of TF-Example keys to parsed tensors. Returns: [num_boxes, 4] tensor of bounding box coordinates, i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max]. """ sides = [] for key in self._full_keys: side = array_ops.expand_dims(keys_to_tensors[key].values, 0) sides.append(side) bounding_box = array_ops.concat(sides, 0) return array_ops.transpose(bounding_box)
Example #17
Source File: clustering_ops.py From lambda-packs with MIT License | 6 votes |
def _compute_euclidean_distance(cls, inputs, clusters): """Computes Euclidean distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. """ output = [] for inp in inputs: with ops.colocate_with(inp): # Computes Euclidean distance. Note the first and third terms are # broadcast additions. squared_distance = (math_ops.reduce_sum( math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul( inp, clusters, transpose_b=True) + array_ops.transpose( math_ops.reduce_sum( math_ops.square(clusters), 1, keep_dims=True))) output.append(squared_distance) return output
Example #18
Source File: gmm_ops.py From lambda-packs with MIT License | 6 votes |
def _define_full_covariance_probs(self, shard_id, shard): """Defines the full covariance probabilties per example in a class. Updates a matrix with dimension num_examples X num_classes. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. """ diff = shard - self._means cholesky = linalg_ops.cholesky(self._covs + self._min_var) log_det_covs = 2.0 * math_ops.reduce_sum( math_ops.log(array_ops.matrix_diag_part(cholesky)), 1) x_mu_cov = math_ops.square( linalg_ops.matrix_triangular_solve( cholesky, array_ops.transpose( diff, perm=[0, 2, 1]), lower=True)) diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1)) self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions) * math_ops.log(2 * np.pi) + log_det_covs)
Example #19
Source File: gmm_ops.py From lambda-packs with MIT License | 6 votes |
def _define_partial_maximization_operation(self, shard_id, shard): """Computes the partial statistics of the means and covariances. Args: shard_id: current shard id. shard: current data shard, 1 X num_examples X dimensions. """ # Soft assignment of each data point to each of the two clusters. self._points_in_k[shard_id] = math_ops.reduce_sum( self._w[shard_id], 0, keep_dims=True) # Partial means. w_mul_x = array_ops.expand_dims( math_ops.matmul( self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True), 1) self._w_mul_x.append(w_mul_x) # Partial covariances. x = array_ops.concat([shard for _ in range(self._num_classes)], 0) x_trans = array_ops.transpose(x, perm=[0, 2, 1]) x_mul_w = array_ops.concat([ array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0) for k in range(self._num_classes) ], 0) self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
Example #20
Source File: array_grad.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _TileGrad(op, grad): """Sum reduces grad along the tiled dimensions.""" assert isinstance(grad, ops.Tensor) input_shape = array_ops.shape(op.inputs[0]) # We interleave multiples and input_shape to get split_shape, # reshape grad to split_shape, and reduce along all even # dimensions (the tiled dimensions) to get the result # with shape input_shape. For example # input_shape = [20, 30, 40] # multiples = [2, 3, 4] # split_shape = [2, 20, 3, 30, 4, 40] # axes = [0, 2, 4] split_shape = array_ops.reshape( array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1]) axes = math_ops.range(0, array_ops.size(split_shape), 2) input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes) # Fix shape inference input_grad.set_shape(op.inputs[0].get_shape()) return [input_grad, None]
Example #21
Source File: gmm_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _define_partial_maximization_operation(self, shard_id, shard): """Computes the partial statistics of the means and covariances. Args: shard_id: current shard id. shard: current data shard, 1 X num_examples X dimensions. """ # Soft assignment of each data point to each of the two clusters. self._points_in_k[shard_id] = math_ops.reduce_sum( self._w[shard_id], 0, keep_dims=True) # Partial means. w_mul_x = array_ops.expand_dims( math_ops.matmul( self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True), 1) self._w_mul_x.append(w_mul_x) # Partial covariances. x = array_ops.concat([shard for _ in range(self._num_classes)], 0) x_trans = array_ops.transpose(x, perm=[0, 2, 1]) x_mul_w = array_ops.concat([ array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0) for k in range(self._num_classes) ], 0) self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
Example #22
Source File: gmm_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _define_full_covariance_probs(self, shard_id, shard): """Defines the full covariance probabilties per example in a class. Updates a matrix with dimension num_examples X num_classes. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. """ diff = shard - self._means cholesky = linalg_ops.cholesky(self._covs + self._min_var) log_det_covs = 2.0 * math_ops.reduce_sum( math_ops.log(array_ops.matrix_diag_part(cholesky)), 1) x_mu_cov = math_ops.square( linalg_ops.matrix_triangular_solve( cholesky, array_ops.transpose( diff, perm=[0, 2, 1]), lower=True)) diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1)) self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions) * math_ops.log(2 * np.pi) + log_det_covs)
Example #23
Source File: clustering_ops.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _compute_euclidean_distance(cls, inputs, clusters): """Computes Euclidean distance between each input and each cluster center. Args: inputs: list of input Tensors. clusters: cluster Tensor. Returns: list of Tensors, where each element corresponds to each element in inputs. The value is the distance of each row to all the cluster centers. """ output = [] for inp in inputs: with ops.colocate_with(inp): # Computes Euclidean distance. Note the first and third terms are # broadcast additions. squared_distance = (math_ops.reduce_sum( math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul( inp, clusters, transpose_b=True) + array_ops.transpose( math_ops.reduce_sum( math_ops.square(clusters), 1, keep_dims=True))) output.append(squared_distance) return output
Example #24
Source File: tfexample_decoder.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def tensors_to_item(self, keys_to_tensors): """Maps the given dictionary of tensors to a contatenated list of bboxes. Args: keys_to_tensors: a mapping of TF-Example keys to parsed tensors. Returns: [num_boxes, 4] tensor of bounding box coordinates, i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max]. """ sides = [] for key in self._full_keys: side = array_ops.expand_dims(keys_to_tensors[key].values, 0) sides.append(side) bounding_box = array_ops.concat(sides, 0) return array_ops.transpose(bounding_box)
Example #25
Source File: image_ops_impl.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def transpose_image(image): """Transpose an image by swapping the first and second dimension. See also `transpose()`. Args: image: 3-D tensor of shape `[height, width, channels]` Returns: A 3-D tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) return array_ops.transpose(image, [1, 0, 2], name='transpose_image')
Example #26
Source File: pooling.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def call(self, inputs): pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) if self.data_format == 'channels_first': # TF does not support channels first with 3D pooling operations, # so we must handle this case manually. inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1)) outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper()) if self.data_format == 'channels_first': outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3)) return outputs
Example #27
Source File: operator_pd.py From lambda-packs with MIT License | 5 votes |
def _flip_vector_to_matrix_static(vec, batch_shape): """flip_vector_to_matrix with static shapes.""" # Shapes associated with batch_shape batch_rank = batch_shape.ndims # Shapes associated with vec. vec = ops.convert_to_tensor(vec, name="vec") vec_shape = vec.get_shape() vec_rank = len(vec_shape) vec_batch_rank = vec_rank - 1 m = vec_batch_rank - batch_rank # vec_shape_left = [M1,...,Mm] or []. vec_shape_left = vec_shape[:m] # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1 # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm] condensed_shape = [np.prod(vec_shape_left)] k = vec_shape[-1] new_shape = batch_shape.concatenate(k).concatenate(condensed_shape) def _flip_front_dims_to_back(): # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm] perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)), 0) return array_ops.transpose(vec, perm=perm) if 0 < m: x_flipped = _flip_front_dims_to_back() else: x_flipped = array_ops.expand_dims(vec, -1) return array_ops.reshape(x_flipped, new_shape)
Example #28
Source File: special_math_ops.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _transpose_if_necessary(tensor, perm): """Like transpose(), but avoids creating a new tensor if possible.""" if perm != range(len(perm)): return array_ops.transpose(tensor, perm=perm) else: return tensor
Example #29
Source File: sample_stats.py From lambda-packs with MIT License | 5 votes |
def _move_dims_to_flat_end(x, axis, x_ndims): """Move dims corresponding to `axis` in `x` to the end, then flatten. Args: x: `Tensor` with shape `[B0,B1,...,Bb]`. axis: Python list of indices into dimensions of `x`. x_ndims: Python integer holding number of dimensions in `x`. Returns: `Tensor` with value from `x` and dims in `axis` moved to end into one single dimension. """ # Suppose x.shape = [a, b, c, d] # Suppose axis = [1, 3] # front_dims = [0, 2] in example above. front_dims = sorted(set(range(x_ndims)).difference(axis)) # x_permed.shape = [a, c, b, d] x_permed = array_ops.transpose(x, perm=front_dims + list(axis)) if x.get_shape().is_fully_defined(): x_shape = x.get_shape().as_list() # front_shape = [a, c], end_shape = [b * d] front_shape = [x_shape[i] for i in front_dims] end_shape = [np.prod([x_shape[i] for i in axis])] full_shape = front_shape + end_shape else: front_shape = array_ops.shape(x_permed)[:x_ndims - len(axis)] end_shape = [-1] full_shape = array_ops.concat([front_shape, end_shape], axis=0) return array_ops.reshape(x_permed, shape=full_shape)
Example #30
Source File: lstm2d.py From lambda-packs with MIT License | 5 votes |
def reduce_to_final(images, num_filters_out, nhidden=None, scope=None): """Reduce an image to a final state by running two LSTMs. Args: images: (num_images, height, width, depth) tensor num_filters_out: output layer depth nhidden: hidden layer depth (defaults to num_filters_out) scope: optional scope name Returns: A (num_images, num_filters_out) batch. """ with variable_scope.variable_scope(scope, "ReduceToFinal", [images]): nhidden = nhidden or num_filters_out batch_size, height, width, depth = _shape(images) transposed = array_ops.transpose(images, [1, 0, 2, 3]) reshaped = array_ops.reshape(transposed, [height, batch_size * width, depth]) with variable_scope.variable_scope("reduce1"): reduced = lstm1d.sequence_to_final(reshaped, nhidden) transposed_hidden = array_ops.reshape(reduced, [batch_size, width, nhidden]) hidden = array_ops.transpose(transposed_hidden, [1, 0, 2]) with variable_scope.variable_scope("reduce2"): output = lstm1d.sequence_to_final(hidden, num_filters_out) return output