Python tensorflow.sparse_reshape() Examples
The following are 26
code examples of tensorflow.sparse_reshape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testFeedPartialShapes(self): with self.test_session(use_gpu=False): # Incorporate new rank into shape information if known sp_input = self._SparseTensorPlaceholder() sp_output = tf.sparse_reshape(sp_input, [2, 3, 5]) self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3]) self.assertListEqual(sp_output.shape.get_shape().as_list(), [3]) # Incorporate known shape information about input indices in output # indices sp_input = self._SparseTensorPlaceholder() sp_input.indices.set_shape([5, None]) sp_output = tf.sparse_reshape(sp_input, [2, 3, 5]) self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3]) self.assertListEqual(sp_output.shape.get_shape().as_list(), [3]) # Even if new_shape has no shape information, we know the ranks of # output indices and shape sp_input = self._SparseTensorPlaceholder() sp_input.indices.set_shape([5, None]) new_shape = tf.placeholder(tf.int64) sp_output = tf.sparse_reshape(sp_input, new_shape) self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None]) self.assertListEqual(sp_output.shape.get_shape().as_list(), [None])
Example #2
Source File: tf_sequence_example_decoder.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def tensors_to_item(self, keys_to_tensors): """Maps the given dictionary of tensors to a concatenated list of bboxes. Args: keys_to_tensors: a mapping of TF-Example keys to parsed tensors. Returns: [time, num_boxes, 4] tensor of bounding box coordinates, in order [y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor or a dense Tensor is determined by the return_dense parameter. Empty positions in the sparse tensor are filled with -1.0 values. """ sides = [] for key in self._full_keys: value = keys_to_tensors[key] expanded_dims = tf.concat( [tf.to_int64(tf.shape(value)), tf.constant([1], dtype=tf.int64)], 0) side = tf.sparse_reshape(value, expanded_dims) sides.append(side) bounding_boxes = tf.sparse_concat(2, sides) if self._return_dense: bounding_boxes = tf.sparse_tensor_to_dense( bounding_boxes, default_value=self._default_value) return bounding_boxes
Example #3
Source File: tf_sequence_example_decoder.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def tensors_to_item(self, keys_to_tensors): """Maps the given dictionary of tensors to a concatenated list of bboxes. Args: keys_to_tensors: a mapping of TF-Example keys to parsed tensors. Returns: [time, num_boxes, 4] tensor of bounding box coordinates, in order [y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor or a dense Tensor is determined by the return_dense parameter. Empty positions in the sparse tensor are filled with -1.0 values. """ sides = [] for key in self._full_keys: value = keys_to_tensors[key] expanded_dims = tf.concat( [tf.to_int64(tf.shape(value)), tf.constant([1], dtype=tf.int64)], 0) side = tf.sparse_reshape(value, expanded_dims) sides.append(side) bounding_boxes = tf.sparse_concat(2, sides) if self._return_dense: bounding_boxes = tf.sparse_tensor_to_dense( bounding_boxes, default_value=self._default_value) return bounding_boxes
Example #4
Source File: model_utils.py From gcnn-survey-paper with Apache License 2.0 | 6 votes |
def get_sp_topk(adj_pred, sp_adj_train, nb_nodes, k): """Returns binary matrix with topK.""" _, indices = tf.nn.top_k(tf.reshape(adj_pred, (-1,)), k) indices = tf.reshape(tf.cast(indices, tf.int64), (-1, 1)) sp_adj_pred = tf.SparseTensor( indices=indices, values=tf.ones(k), dense_shape=(nb_nodes * nb_nodes,)) sp_adj_pred = tf.sparse_reshape(sp_adj_pred, shape=(nb_nodes, nb_nodes, 1)) sp_adj_train = tf.SparseTensor( indices=sp_adj_train.indices, values=tf.ones_like(sp_adj_train.values), dense_shape=sp_adj_train.dense_shape) sp_adj_train = tf.sparse_reshape(sp_adj_train, shape=(nb_nodes, nb_nodes, 1)) sp_adj_pred = tf.sparse_concat( sp_inputs=[sp_adj_pred, sp_adj_train], axis=-1) return tf.sparse_reduce_max(sp_adj_pred, axis=-1)
Example #5
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedNewShapeSameRank(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [3, 10]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, np.array([ [0, 0], [0, 6], [0, 9], [1, 0], [2, 0], [2, 1] ])) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, [3, 10])
Example #6
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testSameShape(self): with self.test_session(use_gpu=False) as sess: input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(input_val, [5, 6]) output_val = sess.run(sp_output) self.assertAllEqual(output_val.indices, input_val.indices) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, input_val.shape)
Example #7
Source File: relational_graph_attention.py From rgat with Apache License 2.0 | 5 votes |
def _attn_r_n_m_h(self): h, r, n = self.heads, self.relations, self._nodes attn_h_n_rm = self._attn_h_n_rm attn_h_n_r_m = tf.sparse_reshape(attn_h_n_rm, [h, n, r, n]) attn_r_n_m_h = tf.sparse_transpose(attn_h_n_r_m, [2, 1, 3, 0]) return attn_r_n_m_h
Example #8
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedSameShape(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [5, 6]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, input_val.indices) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, input_val.shape)
Example #9
Source File: preprocessors.py From mead-baseline with Apache License 2.0 | 5 votes |
def create_word_vectors_from_post(self, raw_post, mxlen): # vocab has only lowercase words word2index = self.index if self.do_lowercase: raw_post = self.lowercase(raw_post) word_tokens = tf.string_split(tf.reshape(raw_post, [-1])) word_indices = word2index.lookup(word_tokens) # Reshape them out to the proper length reshaped_words = tf.sparse_reshape(word_indices, shape=[-1]) return self.reshape_indices(reshaped_words, [mxlen])
Example #10
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedDenseReshapeSemantics(self): with self.test_session(use_gpu=False) as sess: # Compute a random rank-5 initial shape and new shape, randomly sparsify # it, and check that the output of SparseReshape has the same semantics # as a dense reshape. factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements orig_rank = np.random.randint(2, 7) orig_map = np.random.randint(orig_rank, size=factors.shape) orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)] new_rank = np.random.randint(2, 7) new_map = np.random.randint(new_rank, size=factors.shape) new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)] orig_dense = np.random.uniform(size=orig_shape) orig_indices = np.transpose(np.nonzero(orig_dense < 0.5)) orig_values = orig_dense[orig_dense < 0.5] new_dense = np.reshape(orig_dense, new_shape) new_indices = np.transpose(np.nonzero(new_dense < 0.5)) new_values = new_dense[new_dense < 0.5] sp_input = self._SparseTensorPlaceholder() input_val = tf.SparseTensorValue(orig_indices, orig_values, orig_shape) sp_output = tf.sparse_reshape(sp_input, new_shape) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, new_indices) self.assertAllEqual(output_val.values, new_values) self.assertAllEqual(output_val.shape, new_shape)
Example #11
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedMismatchedSizesWithInferredDim(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [4, -1]) with self.assertRaisesOpError("requested shape requires a multiple"): sess.run(sp_output, {sp_input: input_val})
Example #12
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedMultipleInferredDims(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [4, -1, -1]) with self.assertRaisesOpError("only one output shape size may be -1"): sess.run(sp_output, {sp_input: input_val})
Example #13
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedDownRankWithInferredDim(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_2x3x4() sp_output = tf.sparse_reshape(sp_input, [6, -1]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, np.array([ [0, 1], [1, 0], [1, 2], [3, 3], [4, 1], [4, 3], [5, 2] ])) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, [6, 4])
Example #14
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedDownRank(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_2x3x4() sp_output = tf.sparse_reshape(sp_input, [6, 4]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, np.array([ [0, 1], [1, 0], [1, 2], [3, 3], [4, 1], [4, 3], [5, 2] ])) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, [6, 4])
Example #15
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedUpRankWithInferredDim(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [2, -1, 5]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, np.array([ [0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0], [1, 1, 0], [1, 1, 1] ])) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, [2, 3, 5])
Example #16
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedUpRank(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [2, 3, 5]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, np.array([ [0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0], [1, 1, 0], [1, 1, 1] ])) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, [2, 3, 5])
Example #17
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedNewShapeSameRankWithInferredDim(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [3, -1]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, np.array([ [0, 0], [0, 6], [0, 9], [1, 0], [2, 0], [2, 1] ])) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, [3, 10])
Example #18
Source File: sparse_reshape_op_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testFeedSameShapeWithInferredDim(self): with self.test_session(use_gpu=False) as sess: sp_input = self._SparseTensorPlaceholder() input_val = self._SparseTensorValue_5x6() sp_output = tf.sparse_reshape(sp_input, [-1, 6]) output_val = sess.run(sp_output, {sp_input: input_val}) self.assertAllEqual(output_val.indices, input_val.indices) self.assertAllEqual(output_val.values, input_val.values) self.assertAllEqual(output_val.shape, input_val.shape)
Example #19
Source File: scGAN.py From scGAN with MIT License | 4 votes |
def make_input_fn(self, file_paths, epochs=None): """ Function that loads the TFRecords files and creates the placeholders for the data inputs. Parameters ---------- file_paths : list List of TFRecord files from which to read from. epochs : int Integer specifying the number of times to read through the dataset. If None, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call tf.local_variables_initializer() and run the op in a session. Default is None. Returns ------- features : Tensor Tensor containing a batch of cells (vector of expression levels). """ feature_map = {'scg': tf.SparseFeature(index_key='indices', value_key='values', dtype=tf.float32, size=self.genes_no) } options = tf.python_io.TFRecordOptions( tf.python_io.TFRecordCompressionType.GZIP) batched_features = tf.contrib.learn.read_batch_features( file_pattern=file_paths, batch_size=self.batch_size, features=feature_map, reader=lambda: tf.TFRecordReader(options=options), num_epochs=epochs ) sgc = batched_features['scg'] sparse = tf.sparse_reshape(sgc, (self.batch_size, self.genes_no)) dense = tf.sparse_tensor_to_dense(sparse) features = tf.reshape(dense, (self.batch_size, self.genes_no)) return features
Example #20
Source File: layers.py From hetsann with Apache License 2.0 | 4 votes |
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False): with tf.name_scope('sp_attn'): if in_drop != 0.0: seq = tf.nn.dropout(seq, 1.0 - in_drop) seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False) # simplest self-attention possible f_1 = tf.layers.conv1d(seq_fts, 1, 1) f_2 = tf.layers.conv1d(seq_fts, 1, 1) f_1 = tf.reshape(f_1, (nb_nodes, 1)) f_2 = tf.reshape(f_2, (nb_nodes, 1)) f_1 = adj_mat*f_1 f_2 = adj_mat * tf.transpose(f_2, [1,0]) logits = tf.sparse_add(f_1, f_2) lrelu = tf.SparseTensor(indices=logits.indices, values=tf.nn.leaky_relu(logits.values), dense_shape=logits.dense_shape) coefs = tf.sparse_softmax(lrelu) if coef_drop != 0.0: coefs = tf.SparseTensor(indices=coefs.indices, values=tf.nn.dropout(coefs.values, 1.0 - coef_drop), dense_shape=coefs.dense_shape) if in_drop != 0.0: seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop) # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2, # here we make an assumption that our input is of batch size 1, and reshape appropriately. # The method will fail in all other cases! coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes]) seq_fts = tf.squeeze(seq_fts) vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts) vals = tf.expand_dims(vals, axis=0) vals.set_shape([1, nb_nodes, out_sz]) ret = tf.contrib.layers.bias_add(vals) # residual connection if residual: if seq.shape[-1] != ret.shape[-1]: ret = ret + conv1d(seq, ret.shape[-1], 1) # activation else: ret = ret + seq return activation(ret) # activation
Example #21
Source File: layers.py From hetsann with Apache License 2.0 | 4 votes |
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False): with tf.name_scope('sp_attn'): if in_drop != 0.0: seq = tf.nn.dropout(seq, 1.0 - in_drop) seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False) # simplest self-attention possible f_1 = tf.layers.conv1d(seq_fts, 1, 1) f_2 = tf.layers.conv1d(seq_fts, 1, 1) f_1 = tf.reshape(f_1, (nb_nodes, 1)) f_2 = tf.reshape(f_2, (nb_nodes, 1)) f_1 = adj_mat*f_1 f_2 = adj_mat * tf.transpose(f_2, [1,0]) logits = tf.sparse_add(f_1, f_2) lrelu = tf.SparseTensor(indices=logits.indices, values=tf.nn.leaky_relu(logits.values), dense_shape=logits.dense_shape) coefs = tf.sparse_softmax(lrelu) if coef_drop != 0.0: coefs = tf.SparseTensor(indices=coefs.indices, values=tf.nn.dropout(coefs.values, 1.0 - coef_drop), dense_shape=coefs.dense_shape) if in_drop != 0.0: seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop) # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2, # here we make an assumption that our input is of batch size 1, and reshape appropriately. # The method will fail in all other cases! coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes]) seq_fts = tf.squeeze(seq_fts) vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts) vals = tf.expand_dims(vals, axis=0) vals.set_shape([1, nb_nodes, out_sz]) ret = tf.contrib.layers.bias_add(vals) # residual connection if residual: if seq.shape[-1] != ret.shape[-1]: ret = ret + conv1d(seq, ret.shape[-1], 1) # activation else: ret = ret + seq return activation(ret) # activation
Example #22
Source File: layers.py From hetsann with Apache License 2.0 | 4 votes |
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False): with tf.name_scope('sp_attn'): if in_drop != 0.0: seq = tf.nn.dropout(seq, 1.0 - in_drop) seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False) # simplest self-attention possible f_1 = tf.layers.conv1d(seq_fts, 1, 1) f_2 = tf.layers.conv1d(seq_fts, 1, 1) f_1 = tf.reshape(f_1, (nb_nodes, 1)) f_2 = tf.reshape(f_2, (nb_nodes, 1)) f_1 = adj_mat*f_1 f_2 = adj_mat * tf.transpose(f_2, [1,0]) logits = tf.sparse_add(f_1, f_2) lrelu = tf.SparseTensor(indices=logits.indices, values=tf.nn.leaky_relu(logits.values), dense_shape=logits.dense_shape) coefs = tf.sparse_softmax(lrelu) if coef_drop != 0.0: coefs = tf.SparseTensor(indices=coefs.indices, values=tf.nn.dropout(coefs.values, 1.0 - coef_drop), dense_shape=coefs.dense_shape) if in_drop != 0.0: seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop) # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2, # here we make an assumption that our input is of batch size 1, and reshape appropriately. # The method will fail in all other cases! coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes]) seq_fts = tf.squeeze(seq_fts) vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts) vals = tf.expand_dims(vals, axis=0) vals.set_shape([1, nb_nodes, out_sz]) ret = tf.contrib.layers.bias_add(vals) # residual connection if residual: if seq.shape[-1] != ret.shape[-1]: ret = ret + conv1d(seq, ret.shape[-1], 1) # activation else: ret = ret + seq return activation(ret) # activation
Example #23
Source File: input_fn.py From professional-services with Apache License 2.0 | 4 votes |
def make_input_fn(input_dir, batch_size, training=True, num_epochs=None, random_seed=None, prefetch_buffer_size=1): """Generates an input_fn to use as input to estimator or experiment. Combines positive review and negative review datasets into one single dataset. Applies shuffling and batching by sequence length. Args: input_dir: `str`, path to input data. batch_size: `int`, size of input batches. training: `bool`, whether the data is training data or validation data. num_epochs: `int`, number of epochs to repeat the dataset. random_seed: `int`, seed to use for randomization. prefetch_buffer_size: `int`, buffer size to use to prefetch data. Returns: Input function. """ def _get_shape(features, labels): del labels # Unused. return features[constants.SEQUENCE_LENGTH] def _input_fn(): """Train/eval input function.""" schema = utils.get_processed_data_schema() def _parse_input(record): record = tf.parse_single_example(serialized=record, features=schema) labels = record.pop(constants.LABELS) features = parse_raw_text(tf.reshape(record.pop(constants.REVIEW), [-1,])) features[constants.TOKENS] = tf.sparse_reshape( features[constants.TOKENS], shape=[-1]) return features, labels filenames = tf.train.match_filenames_once(input_dir) dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(_parse_input, num_parallel_calls=multiprocessing.cpu_count()) dataset = dataset.shuffle( buffer_size=_SHUFFLE_BUFFER_SIZE, seed=random_seed) # Groups records by sequence length. boundaries = np.arange( _BUCKET_MIN_BOUNDARY, _BUCKET_MAX_BOUNDARY, _BUCKET_LENGTH_STEP) dataset = dataset.apply(group_by_sequence_length_sparse( _get_shape, boundaries, batch_size)) # Repeats dataset. dataset = dataset.repeat(num_epochs if training else 1) dataset = dataset.prefetch(buffer_size=prefetch_buffer_size) return dataset return _input_fn
Example #24
Source File: GAT.py From OpenHINE with MIT License | 4 votes |
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False): with tf.name_scope('sp_attn'): if in_drop != 0.0: seq = tf.nn.dropout(seq, 1.0 - in_drop) seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False) # simplest self-attention possible f_1 = tf.layers.conv1d(seq_fts, 1, 1) f_2 = tf.layers.conv1d(seq_fts, 1, 1) logits = tf.sparse_add(adj_mat * f_1, adj_mat * tf.transpose(f_2, [0, 2, 1])) lrelu = tf.SparseTensor(indices=logits.indices, values=tf.nn.leaky_relu(logits.values), dense_shape=logits.dense_shape) coefs = tf.sparse_softmax(lrelu) if coef_drop != 0.0: coefs = tf.SparseTensor(indices=coefs.indices, values=tf.nn.dropout( coefs.values, 1.0 - coef_drop), dense_shape=coefs.dense_shape) if in_drop != 0.0: seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop) # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2, # here we make an assumption that our input is of batch size 1, and reshape appropriately. # The method will fail in all other cases! coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes]) seq_fts = tf.squeeze(seq_fts) vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts) vals = tf.expand_dims(vals, axis=0) vals.set_shape([1, nb_nodes, out_sz]) ret = tf.contrib.layers.bias_add(vals) # residual connection if residual: if seq.shape[-1] != ret.shape[-1]: ret = ret + conv1d(seq, ret.shape[-1], 1) # activation else: seq_fts = ret + seq return activation(ret) # activation # final_embed, att_val = layers.SimpleAttLayer(multi_embed, mp_att_size, # time_major=False, # return_alphas=True)
Example #25
Source File: layers.py From GAT with MIT License | 4 votes |
def sp_attn_head(seq, out_sz, adj_mat, activation, nb_nodes, in_drop=0.0, coef_drop=0.0, residual=False): with tf.name_scope('sp_attn'): if in_drop != 0.0: seq = tf.nn.dropout(seq, 1.0 - in_drop) seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False) # simplest self-attention possible f_1 = tf.layers.conv1d(seq_fts, 1, 1) f_2 = tf.layers.conv1d(seq_fts, 1, 1) f_1 = tf.reshape(f_1, (nb_nodes, 1)) f_2 = tf.reshape(f_2, (nb_nodes, 1)) f_1 = adj_mat*f_1 f_2 = adj_mat * tf.transpose(f_2, [1,0]) logits = tf.sparse_add(f_1, f_2) lrelu = tf.SparseTensor(indices=logits.indices, values=tf.nn.leaky_relu(logits.values), dense_shape=logits.dense_shape) coefs = tf.sparse_softmax(lrelu) if coef_drop != 0.0: coefs = tf.SparseTensor(indices=coefs.indices, values=tf.nn.dropout(coefs.values, 1.0 - coef_drop), dense_shape=coefs.dense_shape) if in_drop != 0.0: seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop) # As tf.sparse_tensor_dense_matmul expects its arguments to have rank-2, # here we make an assumption that our input is of batch size 1, and reshape appropriately. # The method will fail in all other cases! coefs = tf.sparse_reshape(coefs, [nb_nodes, nb_nodes]) seq_fts = tf.squeeze(seq_fts) vals = tf.sparse_tensor_dense_matmul(coefs, seq_fts) vals = tf.expand_dims(vals, axis=0) vals.set_shape([1, nb_nodes, out_sz]) ret = tf.contrib.layers.bias_add(vals) # residual connection if residual: if seq.shape[-1] != ret.shape[-1]: ret = ret + conv1d(seq, ret.shape[-1], 1) # activation else: ret = ret + seq return activation(ret) # activation
Example #26
Source File: cscGAN.py From scGAN with MIT License | 4 votes |
def make_input_fn(self, file_paths, epochs=None): """ Function that loads the TFRecords files and creates the placeholders for the data inputs. Parameters ---------- file_paths : list List of TFRecord files from which to read from. epochs : int Integer specifying the number of times to read through the dataset. If None, cycles through the dataset forever. NOTE - If specified, creates a variable that must be initialized, so call tf.local_variables_initializer() and run the op in a session. Default is None. Returns ------- features : Tensor Tensor containing a batch of cells (vector of expression levels). cluster : Tensor Tensor containing (a batch of) the cluster indexes of the corresponding cells. """ feature_map = {'scg': tf.SparseFeature(index_key='indices', value_key='values', dtype=tf.float32, size=self.genes_no), 'cluster_int': tf.FixedLenFeature(1, tf.int64)} options = tf.python_io.TFRecordOptions( tf.python_io.TFRecordCompressionType.GZIP) batched_features = tf.contrib.learn.read_batch_features( file_pattern=file_paths, batch_size=self.batch_size, features=feature_map, reader=lambda: tf.TFRecordReader( options=options), num_epochs=epochs) sgc = batched_features['scg'] sparse = tf.sparse_reshape(sgc, (self.batch_size, self.genes_no)) dense = tf.sparse_tensor_to_dense(sparse) cluster = tf.squeeze(tf.to_int32(batched_features['cluster_int'])) features = tf.reshape(dense, (self.batch_size, self.genes_no)) return features, cluster