Python tensorflow.sparse_reorder() Examples

The following are 20 code examples of tensorflow.sparse_reorder(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: example_static.py    From rgat with Apache License 2.0 6 votes vote down vote up
def get_architecture():
    inputs_ph = tf.placeholder(
        dtype=tf.float32, shape=[None, FLAGS.features_dim], name="features_")
    support_ph = tf.sparse_placeholder(
        dtype=tf.float32, shape=[None, None], name="support_")

    tf.logging.info("Reordering indices of support - this is extremely "
                    "important as sparse operations assume sparse indices have "
                    "been ordered.")
    support_reorder = tf.sparse_reorder(support_ph)

    rgat_layer = RGAT(units=FLAGS.units, relations=FLAGS.relations)

    outputs = rgat_layer(inputs=inputs_ph, support=support_reorder)

    return inputs_ph, support_ph, outputs 
Example #2
Source File: MaSIF_ppi_search.py    From masif with Apache License 2.0 5 votes vote down vote up
def build_sparse_matrix_softmax(self, idx_non_zero_values, X, dense_shape_A):
        A = tf.SparseTensorValue(idx_non_zero_values, tf.squeeze(X), dense_shape_A)
        A = tf.sparse_reorder(A)  # n_edges x n_edges
        A = tf.sparse_softmax(A)

        return A 
Example #3
Source File: tfutils.py    From pycodesuggest with MIT License 5 votes vote down vote up
def sparse_transpose(sp_input):
    transposed_indices = tf.reverse(tf.cast(sp_input.indices, tf.int32), [False, True])
    transposed_values = sp_input.values
    transposed_shape = tf.reverse(tf.cast(sp_input.shape, tf.int32), [True])
    sp_output = tf.SparseTensor(tf.cast(transposed_indices, tf.int64), transposed_values, tf.cast(transposed_shape, tf.int64))
    sp_output = tf.sparse_reorder(sp_output)
    return sp_output 
Example #4
Source File: model_mcb.py    From FVTA_MemexQA with MIT License 5 votes vote down vote up
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
	"""
	Return a sparse matrix used for tensor sketch operation in compact bilinear
	pooling

	Args:
		rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
		rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
		output_dim: the output dimensions of compact bilinear pooling.

	Returns:
		a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
	"""

	# Generate a sparse matrix for tensor count sketch
	rand_h = rand_h.astype(np.int64)
	rand_s = rand_s.astype(np.float32)
	assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
	assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

	input_dim = len(rand_h)
	indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
							  rand_h[..., np.newaxis]), axis=1)
	sparse_sketch_matrix = tf.sparse_reorder(
		tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
	return sparse_sketch_matrix 
Example #5
Source File: models_siamese.py    From gcn_metric_learning with MIT License 5 votes vote down vote up
def chebyshev5(self, x, L, Fout, K, regularization=False):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat(0, [x, x_])  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=regularization)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout 
Example #6
Source File: models_siamese.py    From gcn_metric_learning with MIT License 5 votes vote down vote up
def chebyshev5(self, x, L, Fout, K):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat(0, [x, x_])  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout 
Example #7
Source File: dcrnn_cell.py    From DCRNN with MIT License 5 votes vote down vote up
def _build_sparse_matrix(L):
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        return tf.sparse_reorder(L) 
Example #8
Source File: MaSIF_site.py    From masif with Apache License 2.0 5 votes vote down vote up
def build_sparse_matrix_softmax(self, idx_non_zero_values, X, dense_shape_A):
        A = tf.SparseTensorValue(idx_non_zero_values, tf.squeeze(X), dense_shape_A)
        A = tf.sparse_reorder(A)  # n_edges x n_edges
        A = tf.sparse_softmax(A)

        return A 
Example #9
Source File: core.py    From Deep-Learning-with-TensorFlow-Second-Edition with MIT License 5 votes vote down vote up
def init_placeholders(self):
        if self.input_type == 'dense':
            self.train_x = tf.placeholder(tf.float32, shape=[None, self.n_features], name='x')
        else:
            with tf.name_scope('sparse_placeholders') as scope:
                self.raw_indices = tf.placeholder(tf.int64, shape=[None, 2], name='raw_indices')
                self.raw_values = tf.placeholder(tf.float32, shape=[None], name='raw_data')
                self.raw_shape = tf.placeholder(tf.int64, shape=[2], name='raw_shape')
            # tf.sparse_reorder is not needed since scipy return COO in canonical order
            self.train_x = tf.SparseTensor(self.raw_indices, self.raw_values, self.raw_shape)
        self.train_y = tf.placeholder(tf.float32, shape=[None], name='Y') 
Example #10
Source File: MaSIF_ligand.py    From masif with Apache License 2.0 5 votes vote down vote up
def build_sparse_matrix_softmax(self, idx_non_zero_values, X, dense_shape_A):
        A = tf.SparseTensorValue(idx_non_zero_values, tf.squeeze(X), dense_shape_A)
        A = tf.sparse_reorder(A)  # n_edges x n_edges
        A = tf.sparse_softmax(A)

        return A 
Example #11
Source File: sparse_reorder_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGradients(self):
    with self.test_session(use_gpu=False):
      for _ in range(5):  # To test various random permutations
        input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
        sp_input = tf.SparseTensor(
            input_val.indices, input_val.values, input_val.shape)
        sp_output = tf.sparse_reorder(sp_input)

        err = tf.test.compute_gradient_error(
            sp_input.values,
            input_val.values.shape,
            sp_output.values,
            input_val.values.shape,
            x_init_value=input_val.values)
        self.assertLess(err, 1e-11) 
Example #12
Source File: sparse_reorder_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFeedOutOfOrder(self):
    expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
    with self.test_session(use_gpu=False) as sess:
      for _ in range(5):  # To test various random permutations
        sp_input = self._SparseTensorPlaceholder()
        input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
        sp_output = tf.sparse_reorder(sp_input)

        output_val = sess.run(sp_output, {sp_input: input_val})
        self.assertAllEqual(output_val.indices, expected_output_val.indices)
        self.assertAllEqual(output_val.values, expected_output_val.values)
        self.assertAllEqual(output_val.shape, expected_output_val.shape) 
Example #13
Source File: sparse_reorder_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testOutOfOrder(self):
    expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
    with self.test_session(use_gpu=False) as sess:
      for _ in range(5):  # To test various random permutations
        input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
        sp_output = tf.sparse_reorder(input_val)

        output_val = sess.run(sp_output)
        self.assertAllEqual(output_val.indices, expected_output_val.indices)
        self.assertAllEqual(output_val.values, expected_output_val.values)
        self.assertAllEqual(output_val.shape, expected_output_val.shape) 
Example #14
Source File: sparse_reorder_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testFeedAlreadyInOrder(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input = self._SparseTensorPlaceholder()
      input_val = self._SparseTensorValue_5x6(np.arange(6))
      sp_output = tf.sparse_reorder(sp_input)

      output_val = sess.run(sp_output, {sp_input: input_val})
      self.assertAllEqual(output_val.indices, input_val.indices)
      self.assertAllEqual(output_val.values, input_val.values)
      self.assertAllEqual(output_val.shape, input_val.shape) 
Example #15
Source File: sparse_reorder_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testAlreadyInOrder(self):
    with self.test_session(use_gpu=False) as sess:
      input_val = self._SparseTensorValue_5x6(np.arange(6))
      sp_output = tf.sparse_reorder(input_val)

      output_val = sess.run(sp_output)
      self.assertAllEqual(output_val.indices, input_val.indices)
      self.assertAllEqual(output_val.values, input_val.values)
      self.assertAllEqual(output_val.shape, input_val.shape) 
Example #16
Source File: compact_bilinear_pooling.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
    """
    Return a sparse matrix used for tensor sketch operation in compact bilinear
    pooling

    Args:
        rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
        rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
        output_dim: the output dimensions of compact bilinear pooling.

    Returns:
        a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
    """

    # Generate a sparse matrix for tensor count sketch
    rand_h = rand_h.astype(np.int64)
    rand_s = rand_s.astype(np.float32)
    assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
    assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

    input_dim = len(rand_h)
    indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
                              rand_h[..., np.newaxis]), axis=1)
    sparse_sketch_matrix = tf.sparse_reorder(
        tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
    return sparse_sketch_matrix 
Example #17
Source File: compact_bilinear_pooling.py    From RGB-N with MIT License 5 votes vote down vote up
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
    """
    Return a sparse matrix used for tensor sketch operation in compact bilinear
    pooling

    Args:
        rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
        rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
        output_dim: the output dimensions of compact bilinear pooling.

    Returns:
        a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
    """

    # Generate a sparse matrix for tensor count sketch
    rand_h = rand_h.astype(np.int64)
    rand_s = rand_s.astype(np.float32)
    assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
    assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

    input_dim = len(rand_h)
    indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
                              rand_h[..., np.newaxis]), axis=1)
    sparse_sketch_matrix = tf.sparse_reorder(
        tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
    return sparse_sketch_matrix 
Example #18
Source File: graph_cnn.py    From TextCategorization with MIT License 4 votes vote down vote up
def graph_conv_chebyshev(self, x, L, K, F_out):
        """
        Graph convolutional operation.
        """
        # K = Chebyshev polynomial order & support size
        # F_out = No. of output features (per vertex)
        # B = Batch size
        # V = No. of vertices
        # F_in = No. of input features (per vertex)
        B, V, F_in = x.get_shape()
        B, V, F_in = int(B), int(V), int(F_in)

        # Rescale Laplacian and store as a TF sparse tensor (copy to not modify the shared L)
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        L = tf.cast(L, tf.float32)

        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])     # V x F_in x B
        x0 = tf.reshape(x0, [V, F_in * B])       # V x F_in*B
        x = tf.expand_dims(x0, 0)                # 1 x V x F_in*B

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)           # 1 x V x F_in*B
            return tf.concat([x, x_], axis=0)    # K x V x F_in*B
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # V x F_in*B
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, V, F_in, B])       # K x V x F_in x B
        x = tf.transpose(x, perm=[3, 1, 2, 0])   # B x V x F_in x K
        x = tf.reshape(x, [B * V, F_in * K])     # B*V x F_in*K

        # Compose linearly F_in features to get F_out features
        W = tf.Variable(tf.truncated_normal([F_in * K, F_out], stddev=0.1), name="W")
        x = tf.matmul(x, W)                      # B*V x F_out
        x = tf.reshape(x, [B, V, F_out])         # B x V x F_out

        return x 
Example #19
Source File: example_eager.py    From rgat with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
    tf.logging.info("{} Flags {}".format('*'*15, '*'*15))
    for k, v in FLAGS.flag_values_dict().items():
        tf.logging.info("FLAG `{}`: {}".format(k, v))
    tf.logging.info('*' * (2 * 15 + len(' Flags ')))

    np.random.seed(FLAGS.seed)
    tf.set_random_seed(FLAGS.seed)

    rgat_layer = RGAT(units=FLAGS.units, relations=FLAGS.relations)

    features, supports = get_batch_of_features_supports_values()

    # Route 1: Run RGAT on each element in the batch separately and combine the
    # results
    individual_supports = [
        graph_utils.relational_supports_to_support(d) for d in supports]
    individual_supports = [
        graph_utils.triple_from_coo(s) for s in individual_supports]
    individual_supports = [
        tf.SparseTensor(i, v, ds) for i, v, ds in individual_supports]
    individual_supports = [
        tf.sparse_reorder(s) for s in individual_supports]

    individual_results = [
        rgat_layer(inputs=f, support=s)
        for f, s in zip(features, individual_supports)]
    individual_results = tf.concat(individual_results, axis=0)

    # Route 2: First combine the batch into a single graph and pass everything
    # through in one go
    combined_features = tf.concat(features, axis=0)

    combined_supports = graph_utils.batch_of_relational_supports_to_support(
        supports)
    combined_supports = graph_utils.triple_from_coo(combined_supports)
    combined_supports = tf.SparseTensor(*combined_supports)
    combined_supports = tf.sparse_reorder(combined_supports)

    combined_results = rgat_layer(
        inputs=combined_features, support=combined_supports)

    if np.allclose(combined_results, individual_results):
        tf.logging.info("The approaches match!")
    else:
        raise ValueError(
            "Doing each element in a batch independently does not produce the "
            "same results as doing all the batch in one go. Something has "
            "clearly broken. Please contact the author ASAP :).") 
Example #20
Source File: model.py    From gconvRNN with MIT License 4 votes vote down vote up
def cheby_conv(x, L, lmax, feat_out, K, W):
    '''
    x : [batch_size, N_node, feat_in] - input of each time step
    nSample : number of samples = batch_size
    nNode : number of node in graph
    feat_in : number of input feature
    feat_out : number of output feature
    L : laplacian
    lmax : ?
    K : size of kernel(number of cheby coefficients)
    W : cheby_conv weight [K * feat_in, feat_out]
    '''
    nSample, nNode, feat_in = x.get_shape()
    nSample, nNode, feat_in = int(nSample), int(nNode), int(feat_in) 
    L = graph.rescale_L(L, lmax) #What is this operation?? --> rescale Laplacian
    L = L.tocoo() 
    
    indices = np.column_stack((L.row, L.col))
    L = tf.SparseTensor(indices, L.data, L.shape)
    L = tf.sparse_reorder(L)
    
    x0 = tf.transpose(x, perm=[1, 2, 0]) #change it to [nNode, feat_in, nSample]
    x0 = tf.reshape(x0, [nNode, feat_in*nSample])
    x = tf.expand_dims(x0, 0) # make it [1, nNode, feat_in*nSample]
    
    def concat(x, x_):
        x_ = tf.expand_dims(x_, 0)
        return tf.concat([x, x_], axis=0)
    
    if K > 1:
        x1 = tf.sparse_tensor_dense_matmul(L, x0)
        x = concat(x, x1)
        
    for k in range(2, K):
        x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0
        x = concat(x, x2)
        x0, x1 = x1, x2
        
    x = tf.reshape(x, [K, nNode, feat_in, nSample])
    x = tf.transpose(x, perm=[3,1,2,0])
    x = tf.reshape(x, [nSample*nNode, feat_in*K])
    
    x = tf.matmul(x, W) #No Bias term?? -> Yes
    out = tf.reshape(x, [nSample, nNode, feat_out])
    return out

# gconvLSTM