Python tensorflow.sparse_placeholder() Examples
The following are 30
code examples of tensorflow.sparse_placeholder().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: base_models.py From gcnn-survey-paper with Apache License 2.0 | 6 votes |
def _create_placeholders(self): """Create placeholders.""" with tf.name_scope('input'): self.placeholders = { 'adj_true': tf.placeholder(tf.float32, shape=[None, None]), # to compute loss 'adj_train': tf.placeholder(tf.float32, shape=[None, None]), # for inference step 'adj_train_norm': tf.sparse_placeholder(tf.float32), # normalized 'edge_mask': tf.sparse_placeholder(tf.float32), 'node_labels': tf.placeholder(tf.float32, shape=[None, self.n_hidden_node[-1]]), 'node_mask': tf.placeholder(tf.float32, shape=[ None, ]), 'is_training': tf.placeholder(tf.bool), } if self.sparse_features: self.placeholders['features'] = tf.sparse_placeholder(tf.float32) else: self.placeholders['features'] = tf.placeholder( tf.float32, shape=[None, self.input_dim])
Example #2
Source File: neural_dater.py From NeuralDater with Apache License 2.0 | 6 votes |
def add_placeholders(self): """ Defines the placeholder required for the model """ self.input_x = tf.placeholder(tf.int32, shape=[None, None], name='input_data') # Words in a document (batch_size x max_words) self.input_y = tf.placeholder(tf.int32, shape=[None, None], name='input_labels') # Actual document creation year of the document self.x_len = tf.placeholder(tf.int32, shape=[None], name='input_len') # Number of words in each document in a batch self.et_idx = tf.placeholder(tf.int32, shape=[None, None], name='et_idx') # Index of tokens which are events/time_expressions self.et_mask = tf.placeholder(tf.float32, shape=[None, None], name='et_mask') # Array of batch_size number of dictionaries, where each dictionary is mapping of label to sparse_placeholder [Temporal graph] self.de_adj_mat = [{lbl: tf.sparse_placeholder(tf.float32, shape=[None, None]) for lbl in range(self.num_deLabel)} for _ in range(self.p.batch_size)] # Array of batch_size number of dictionaries, where each dictionary is mapping of label to sparse_placeholder [Syntactic graph] self.et_adj_mat = [{lbl: tf.sparse_placeholder(tf.float32, shape=[None, None]) for lbl in range(self.num_etLabel)} for _ in range(self.p.batch_size)] self.seq_len = tf.placeholder(tf.int32, shape=(), name='seq_len') # Maximum number of words in documents of a batch self.max_et = tf.placeholder(tf.int32, shape=(), name='max_et') # Maximum number of events/time_expressions in documents of a batch self.dropout = tf.placeholder_with_default(self.p.dropout, shape=(), name='dropout') # Dropout used in GCN Layer self.rec_dropout = tf.placeholder_with_default(self.p.rec_dropout, shape=(), name='rec_dropout') # Dropout used in Bi-LSTM
Example #3
Source File: example_static.py From rgat with Apache License 2.0 | 6 votes |
def get_architecture(): inputs_ph = tf.placeholder( dtype=tf.float32, shape=[None, FLAGS.features_dim], name="features_") support_ph = tf.sparse_placeholder( dtype=tf.float32, shape=[None, None], name="support_") tf.logging.info("Reordering indices of support - this is extremely " "important as sparse operations assume sparse indices have " "been ordered.") support_reorder = tf.sparse_reorder(support_ph) rgat_layer = RGAT(units=FLAGS.units, relations=FLAGS.relations) outputs = rgat_layer(inputs=inputs_ph, support=support_reorder) return inputs_ph, support_ph, outputs
Example #4
Source File: speech_input.py From speechT with Apache License 2.0 | 6 votes |
def __init__(self, input_size, batch_size, data_generator_creator, max_steps=None): super().__init__(input_size) self.batch_size = batch_size self.data_generator_creator = data_generator_creator self.steps_left = max_steps with tf.device("/cpu:0"): # Define input and label placeholders # inputs is of dimension [batch_size, max_time, input_size] self.inputs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='inputs') self.sequence_lengths = tf.placeholder(tf.int32, [batch_size], name='sequence_lengths') self.labels = tf.sparse_placeholder(tf.int32, name='labels') # Queue for inputs and labels self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.int32, tf.string], capacity=100) # queues do not support sparse tensors yet, we need to serialize... serialized_labels = tf.serialize_many_sparse(self.labels) self.enqueue_op = self.queue.enqueue([self.inputs, self.sequence_lengths, serialized_labels])
Example #5
Source File: base_models.py From gcnn-survey-paper with Apache License 2.0 | 6 votes |
def _create_placeholders(self): """Create placeholders.""" with tf.name_scope('input'): self.placeholders = { 'adj_train': tf.sparse_placeholder(tf.float32), # normalized 'node_labels': tf.placeholder(tf.float32, shape=[None, self.n_hidden[-1]]), 'node_mask': tf.placeholder(tf.float32, shape=[ None, ]), 'is_training': tf.placeholder(tf.bool), } if self.sparse_features: self.placeholders['features'] = tf.sparse_placeholder(tf.float32) else: self.placeholders['features'] = tf.placeholder( tf.float32, shape=[None, self.input_dim])
Example #6
Source File: base_models.py From gcnn-survey-paper with Apache License 2.0 | 6 votes |
def _create_placeholders(self): """Create placeholders.""" with tf.name_scope('input'): self.placeholders = { # to compute metrics 'adj_true': tf.placeholder(tf.float32, shape=[None, None]), # to compute loss 'adj_train': tf.placeholder(tf.float32, shape=[None, None]), # for inference step 'adj_train_norm': tf.sparse_placeholder(tf.float32), # normalized 'edge_mask': tf.sparse_placeholder(tf.float32), 'is_training': tf.placeholder(tf.bool), } if self.sparse_features: self.placeholders['features'] = tf.sparse_placeholder(tf.float32) else: self.placeholders['features'] = tf.placeholder( tf.float32, shape=[None, self.input_dim])
Example #7
Source File: utils.py From Automatic_Speech_Recognition with MIT License | 6 votes |
def get_edit_distance(hyp_arr, truth_arr, normalize, level): ''' calculate edit distance This is very universal, both for cha-level and phn-level ''' graph = tf.Graph() with graph.as_default(): truth = tf.sparse_placeholder(tf.int32) hyp = tf.sparse_placeholder(tf.int32) editDist = tf.reduce_sum(tf.edit_distance(hyp, truth, normalize=normalize)) with tf.Session(graph=graph) as session: truthTest = list_to_sparse_tensor(truth_arr, level) hypTest = list_to_sparse_tensor(hyp_arr, level) feedDict = {truth: truthTest, hyp: hypTest} dist = session.run(editDist, feed_dict=feedDict) return dist
Example #8
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #9
Source File: gcnAPI.py From GPF with MIT License | 5 votes |
def build_placeholders(self): num_supports = 1 self.placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(self.features[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, self.labels.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), # helper variable for sparse dropout 'num_features_nonzero': tf.placeholder(tf.int32) }
Example #10
Source File: orcmodel.py From Unofficial-Zhihu-API with MIT License | 5 votes |
def __init__(self, mode): self.mode = mode # 图像输入 self.inputs = tf.placeholder(tf.float32, [None, FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel]) # ctc_loss 需要的是稀疏矩阵 self.labels = tf.sparse_placeholder(tf.int32) # 一维数组,大小[batch_size] self.seq_len = tf.placeholder(tf.int32, [None]) # l2 self._extra_train_ops = [] # 存储调整平滑均值,平滑方差的操作
Example #11
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #12
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #13
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #14
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #15
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #16
Source File: tensorflow_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #17
Source File: tf_train_ctc.py From RNN-Tutorial with Apache License 2.0 | 5 votes |
def setup_network_and_graph(self): # e.g: log filter bank or MFCC features # shape = [batch_size, max_stepsize, n_input + (2 * n_input * n_context)] # the batch_size and max_stepsize can vary along each step self.input_tensor = tf.placeholder( tf.float32, [None, None, self.n_input + (2 * self.n_input * self.n_context)], name='input') # Use sparse_placeholder; will generate a SparseTensor, required by ctc_loss op. self.targets = tf.sparse_placeholder(tf.int32, name='targets') # 1d array of size [batch_size] self.seq_length = tf.placeholder(tf.int32, [None], name='seq_length')
Example #18
Source File: orcmodel.py From Unofficial-Zhihu-API with MIT License | 5 votes |
def __init__(self, mode): self.mode = mode # 图像输入 self.inputs = tf.placeholder(tf.float32, [None, FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel]) # ctc_loss 需要的是稀疏矩阵 self.labels = tf.sparse_placeholder(tf.int32) # 一维数组,大小[batch_size] self.seq_len = tf.placeholder(tf.int32, [None]) # l2 self._extra_train_ops = [] # 存储调整平滑均值,平滑方差的操作
Example #19
Source File: data_pipeline.py From pregel with MIT License | 5 votes |
def _set_placeholder_dict(self): ''' Logic borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/fully_connected_feed.py''' labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.label_size), name=LABELS) features_placeholder = tf.placeholder(tf.float32, shape=(None, self.feature_size), name=FEATURES) if (self.model_params.sparse_features): features_placeholder = tf.sparse_placeholder(tf.float32, shape=(None, self.feature_size), name=FEATURES) mask_placeholder = tf.placeholder(tf.float32, name=MASK) # For disabling dropout during testing - based on https://stackoverflow.com/questions/44971349/how-to-turn-off-dropout-for-testing-in-tensorflow dropout_placeholder = tf.placeholder_with_default(0.0, shape=(), name=DROPOUT) support_placeholder = [] for i in range(self.support_size): support_placeholder.append(tf.sparse_placeholder(tf.float32, name=SUPPORTS + str(i))) self.placeholder_dict = { FEATURES: features_placeholder, LABELS: labels_placeholder, SUPPORTS: support_placeholder, MASK: mask_placeholder, DROPOUT: dropout_placeholder }
Example #20
Source File: data_pipeline_ae.py From pregel with MIT License | 5 votes |
def _set_placeholder_dict(self): ''' Logic borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/fully_connected_feed.py''' labels_placeholder = tf.sparse_placeholder(tf.float32, name=LABELS) # Since this is the auto-encoder model, we are basically passing along the original adjacency matrix features_placeholder = tf.placeholder(tf.float32, shape=(None, self.feature_size), name=FEATURES) if (self.model_params.sparse_features): features_placeholder = tf.sparse_placeholder(tf.float32, shape=(None, self.feature_size), name=FEATURES) # For disabling dropout during testing - based on https://stackoverflow.com/questions/44971349/how-to-turn-off-dropout-for-testing-in-tensorflow dropout_placeholder = tf.placeholder_with_default(0.0, shape=(), name=DROPOUT) mask_placeholder = tf.sparse_placeholder(tf.float32, name=MASK) mode_placeholder = tf.placeholder(tf.string, name=MODE) normalisation_constant_placeholder = tf.placeholder_with_default(0.5, shape=(), name=NORMALISATION_CONSTANT) support_placeholder = [] for i in range(self.support_size): support_placeholder.append(tf.sparse_placeholder(tf.float32, name=SUPPORTS + str(i))) self.placeholder_dict = { LABELS: labels_placeholder, FEATURES: features_placeholder, SUPPORTS: support_placeholder, MASK: mask_placeholder, DROPOUT: dropout_placeholder, MODE: mode_placeholder, NORMALISATION_CONSTANT: normalisation_constant_placeholder }
Example #21
Source File: param.py From GGP with Apache License 2.0 | 5 votes |
def make_tf_array(self): self._tf_array = tf.sparse_placeholder(dtype=self._get_type(self._coo_array[1]), shape=[None] * len(self._coo_array[2]), name=self.name)
Example #22
Source File: ed.py From Automatic_Speech_Recognition with MIT License | 5 votes |
def get_edit_distance(hyp_arr,truth_arr,mode='train'): ''' calculate edit distance ''' graph = tf.Graph() with graph.as_default(): truth = tf.sparse_placeholder(tf.int32) hyp = tf.sparse_placeholder(tf.int32) editDist = tf.edit_distance(hyp, truth, normalize=True) with tf.Session(graph=graph) as session: truthTest = list_to_sparse_tensor(truth_arr, mode) hypTest = list_to_sparse_tensor(hyp_arr, mode) feedDict = {truth: truthTest, hyp: hypTest} dist = session.run(editDist, feed_dict=feedDict) return dist
Example #23
Source File: models.py From Recommender-Systems-Samples with MIT License | 5 votes |
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_w=0, l2_v=0, random_seed=None): Model.__init__(self) init_vars = [('w', [input_dim, output_dim], 'xavier', dtype), ('v', [input_dim, factor_order], 'xavier', dtype), ('b', [output_dim], 'zero', dtype)] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = tf.sparse_placeholder(dtype) self.y = tf.placeholder(dtype) self.vars = utils.init_var_map(init_vars, init_path) X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X))) xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, self.vars['v'])) p = 0.5 * tf.reshape( tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(self.vars['v'])), 1), [-1, output_dim]) xw = tf.sparse_tensor_dense_matmul(self.X, self.vars['w']) logits = tf.reshape(xw + self.vars['b'] + p, [-1]) self.y_prob = tf.sigmoid(logits) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \ l2_w * tf.nn.l2_loss(xw) + \ l2_v * tf.nn.l2_loss(xv) self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
Example #24
Source File: gcnAPI.py From OpenNE with MIT License | 5 votes |
def build_placeholders(self): num_supports = 1 self.placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(self.features[2], dtype=tf.int64)), 'labels': tf.placeholder(tf.float32, shape=(None, self.labels.shape[1])), 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), # helper variable for sparse dropout 'num_features_nonzero': tf.placeholder(tf.int32) }
Example #25
Source File: tensorflow_backend.py From keras-lambda with MIT License | 5 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiates a placeholder tensor and returns it. # Arguments shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. # Returns Tensor instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` """ if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) if sparse: x = tf.sparse_placeholder(dtype, shape=shape, name=name) else: x = tf.placeholder(dtype, shape=shape, name=name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #26
Source File: asne.py From ASNE with GNU General Public License v3.0 | 5 votes |
def _setup_variables(self): """ Creating TensorFlow variables and placeholders. """ self.node_embedding = tf.random_uniform([self.node_count, self.args.node_embedding_dimensions], -1.0, 1.0) self.node_embedding = tf.Variable(self.node_embedding, dtype=tf.float32) self.feature_embedding = tf.random_uniform([self.feature_count, self.args.feature_embedding_dimensions], -1.0, 1.0) self.feature_embedding = tf.Variable(self.feature_embedding, dtype=tf.float32) self.combined_dimensions = self.args.node_embedding_dimensions + self.args.feature_embedding_dimensions self.noise_embedding = tf.Variable(tf.truncated_normal([self.node_count, self.combined_dimensions], stddev=1.0/math.sqrt(self.combined_dimensions)), dtype=tf.float32) self.noise_bias = tf.Variable(tf.zeros([self.node_count]), dtype=tf.float32) self.noise_bias = tf.Variable(tf.zeros([self.node_count]), dtype=tf.float32) self.left_nodes = tf.placeholder(tf.int32, shape=[None]) self.node_features = tf.sparse_placeholder(tf.float32, shape=[None, self.feature_count]) self.right_nodes = tf.placeholder(tf.int32, shape=[None, 1])
Example #27
Source File: models.py From ML_CIA with MIT License | 5 votes |
def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0, random_seed=None): Model.__init__(self) init_vars = [('w', [input_dim, output_dim], 'xavier', dtype), ('b', [output_dim], 'zero', dtype)] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = tf.sparse_placeholder(dtype) self.y = tf.placeholder(dtype) self.vars = utils.init_var_map(init_vars, init_path) w = self.vars['w'] b = self.vars['b'] xw = tf.sparse_tensor_dense_matmul(self.X, w) logits = tf.reshape(xw + b, [-1]) self.y_prob = tf.sigmoid(logits) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \ l2_weight * tf.nn.l2_loss(xw) self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
Example #28
Source File: models.py From ML_CIA with MIT License | 5 votes |
def __init__(self, input_dim=None, output_dim=1, factor_order=10, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_w=0, l2_v=0, random_seed=None): Model.__init__(self) init_vars = [('w', [input_dim, output_dim], 'xavier', dtype), ('v', [input_dim, factor_order], 'xavier', dtype), ('b', [output_dim], 'zero', dtype)] self.graph = tf.Graph() with self.graph.as_default(): if random_seed is not None: tf.set_random_seed(random_seed) self.X = tf.sparse_placeholder(dtype) self.y = tf.placeholder(dtype) self.vars = utils.init_var_map(init_vars, init_path) w = self.vars['w'] v = self.vars['v'] b = self.vars['b'] X_square = tf.SparseTensor(self.X.indices, tf.square(self.X.values), tf.to_int64(tf.shape(self.X))) xv = tf.square(tf.sparse_tensor_dense_matmul(self.X, v)) p = 0.5 * tf.reshape( tf.reduce_sum(xv - tf.sparse_tensor_dense_matmul(X_square, tf.square(v)), 1), [-1, output_dim]) xw = tf.sparse_tensor_dense_matmul(self.X, w) logits = tf.reshape(xw + b + p, [-1]) self.y_prob = tf.sigmoid(logits) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)) + \ l2_w * tf.nn.l2_loss(xw) + \ l2_v * tf.nn.l2_loss(xv) self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf.global_variables_initializer().run(session=self.sess)
Example #29
Source File: neurosat.py From neurosat with Apache License 2.0 | 5 votes |
def declare_placeholders(self): self.n_vars = tf.placeholder(tf.int32, shape=[], name='n_vars') self.n_lits = tf.placeholder(tf.int32, shape=[], name='n_lits') self.n_clauses = tf.placeholder(tf.int32, shape=[], name='n_clauses') self.L_unpack = tf.sparse_placeholder(tf.float32, shape=[None, None], name='L_unpack') self.is_sat = tf.placeholder(tf.bool, shape=[None], name='is_sat') # useful helpers self.n_batches = tf.shape(self.is_sat)[0] self.n_vars_per_batch = tf.div(self.n_vars, self.n_batches)
Example #30
Source File: model.py From graph2gauss with MIT License | 5 votes |
def __setup_inductive(self, A, X, p_nodes): N = A.shape[0] nodes_rnd = np.random.permutation(N) n_hide = int(N * p_nodes) nodes_hide = nodes_rnd[:n_hide] A_hidden = A.copy().tolil() A_hidden[nodes_hide] = 0 A_hidden[:, nodes_hide] = 0 # additionally add any dangling nodes to the hidden ones since we can't learn from them nodes_dangling = np.where(A_hidden.sum(0).A1 + A_hidden.sum(1).A1 == 0)[0] if len(nodes_dangling) > 0: nodes_hide = np.concatenate((nodes_hide, nodes_dangling)) nodes_keep = np.setdiff1d(np.arange(N), nodes_hide) self.X = tf.sparse_placeholder(tf.float32) self.feed_dict = {self.X: sparse_feeder(X[nodes_keep])} self.ind_pairs = batch_pairs_sample(A, nodes_hide) self.ind_ground_truth = A[self.ind_pairs[:, 0], self.ind_pairs[:, 1]].A1 self.ind_feed_dict = {self.X: sparse_feeder(X)} A = A[nodes_keep][:, nodes_keep] return A