Python tensorflow.variable() Examples
The following are 7
code examples of tensorflow.variable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: EmbeddingModel.py From AmpliGraph with Apache License 2.0 | 6 votes |
def _end_training(self): """Performs clean up tasks after training. """ # Reset this variable as it is reused during evaluation phase if self.is_filtered and self.eval_dataset_handle is not None: # cleanup the evaluation data (deletion of tables self.eval_dataset_handle.cleanup() self.eval_dataset_handle = None if self.train_dataset_handle is not None: self.train_dataset_handle.cleanup() self.train_dataset_handle = None self.is_filtered = False self.eval_config = {} # close the tf session if self.sess_train is not None: self.sess_train.close() # set is_fitted to true to indicate that the model fitting is completed self.is_fitted = True
Example #2
Source File: base_ranking_model.py From ULTRA with Apache License 2.0 | 6 votes |
def get_variable(self, name, shape, noisy_params=None, noise_rate=0.05, **kwargs): """Get a tensorflow variable for the model. Add noise if required. Args: name: The name of the variable. shape: The shape of the variable. noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add. noise_rate: (float) A value specify how much noise to add. Returns: A tf.Tensor """ var = tf.get_variable(name, shape, **kwargs) self.model_parameters[var.name] = var if noisy_params is not None and var.name in noisy_params: var = var + noisy_params[var.name] * noise_rate return var
Example #3
Source File: Linear.py From ULTRA with Apache License 2.0 | 5 votes |
def build(self, input_list, noisy_params=None, noise_rate=0.05, is_training=False, **kwargs): """ Create the Linear model Args: input_list: (list<tf.tensor>) A list of tensors containing the features for a list of documents. noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add. noise_rate: (float) A value specify how much noise to add. is_training: (bool) A flag indicating whether the model is running in training mode. Returns: A list of tf.Tensor containing the ranking scores for each instance in input_list. """ with tf.variable_scope(tf.get_variable_scope(), initializer=self.initializer, reuse=tf.AUTO_REUSE): input_data = tf.concat(input_list, axis=0) output_data = input_data output_sizes = [1] if self.layer_norm is None and self.hparams.norm in BaseRankingModel.NORM_FUNC_DIC: self.layer_norm = [] for j in range(len(output_sizes)): self.layer_norm.append(BaseRankingModel.NORM_FUNC_DIC[self.hparams.norm]( name="layer_norm_%d" % j)) current_size = output_data.get_shape()[-1].value for j in range(len(output_sizes)): if self.layer_norm is not None: output_data = self.layer_norm[j]( output_data, training=is_training) expand_W = self.get_variable( "linear_W_%d" % j, [current_size, output_sizes[j]], noisy_params=noisy_params, noise_rate=noise_rate) expand_b = self.get_variable( "linear_b_%d" % j, [output_sizes[j]], noisy_params=noisy_params, noise_rate=noise_rate) output_data = tf.nn.bias_add( tf.matmul(output_data, expand_W), expand_b) return tf.split(output_data, len(input_list), axis=0)
Example #4
Source File: base_ranking_model.py From ULTRA with Apache License 2.0 | 5 votes |
def build(self, input_list, noisy_params=None, noise_rate=0.05, is_training=False, **kwargs): """ Create the model Args: input_list: (list<tf.tensor>) A list of tensors containing the features for a list of documents. noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add. noise_rate: (float) A value specify how much noise to add. is_training: (bool) A flag indicating whether the model is running in training mode. Returns: A list of tf.Tensor containing the ranking scores for each instance in input_list. """ pass
Example #5
Source File: SetRank.py From ULTRA with Apache License 2.0 | 5 votes |
def build(self, input_list, noisy_params=None, noise_rate=0.05, is_training=False, **kwargs): """ Create the SetRank model (no supports for noisy parameters) Args: input_list: (list<tf.tensor>) A list of tensors containing the features for a list of documents. noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add. noise_rate: (float) A value specify how much noise to add. is_training: (bool) A flag indicating whether the model is running in training mode. Returns: A list of tf.Tensor containing the ranking scores for each instance in input_list. """ with tf.variable_scope(tf.get_variable_scope() or "transformer", reuse=tf.AUTO_REUSE, initializer=self.initializer): sco_cur = tf.get_variable_scope() print(sco_cur.name, "sco_cur.name") mask = None batch_size = tf.shape(input_list[0])[0] feature_size = tf.shape(input_list[0])[1] list_size = len(input_list) ind = list(range(0, list_size)) random.shuffle(ind) # input_list=[input_list[i] for i in ind ] x = [tf.expand_dims(e, 1)for e in input_list] x = tf.concat(axis=1, values=x) # [batch,len_seq,feature_size] x = self.Encoder_layer(x, is_training, mask) # [batch,len_seq,1] output = [] for i in range(list_size): output.append(x[:, i, :]) # reind_output=[None]*list_size # for i in range(list_size): # reind_output[ind[i]]=output[i] # output=reind_output return output # [len_seq,batch,1]
Example #6
Source File: EmbeddingModel.py From AmpliGraph with Apache License 2.0 | 4 votes |
def _load_model_from_trained_params(self): """Load the model from trained params. While restoring make sure that the order of loaded parameters match the saved order. It's the duty of the embedding model to load the variables correctly. This method must be overridden if the model has any other parameters (apart from entity-relation embeddings). This function also set's the evaluation mode to do lazy loading of variables based on the number of distinct entities present in the graph. """ # Generate the batch size based on entity length and batch_count self.batch_size = int(np.ceil(len(self.ent_to_idx) / self.batches_count)) if len(self.ent_to_idx) > ENTITY_THRESHOLD: self.dealing_with_large_graphs = True logger.warning('Your graph has a large number of distinct entities. ' 'Found {} distinct entities'.format(len(self.ent_to_idx))) logger.warning('Changing the variable loading strategy to use lazy loading of variables...') logger.warning('Evaluation would take longer than usual.') if not self.dealing_with_large_graphs: # (We use tf.variable for future - to load and continue training) self.ent_emb = tf.Variable(self.trained_model_params[0], dtype=tf.float32) else: # Embeddings of all the corruptions entities will not fit on GPU. # During training we loaded batch_size*2 embeddings on GPU as only 2* batch_size unique # entities can be present in one batch. # During corruption generation in eval mode, one side(s/o) is fixed and only the other side varies. # Hence we use a batch size of 2 * training_batch_size for corruption generation i.e. those many # corruption embeddings would be loaded per batch on the GPU. In other words, those corruptions # would be processed as a batch. self.corr_batch_size = self.batch_size * 2 # Load the entity embeddings on the cpu self.ent_emb_cpu = self.trained_model_params[0] # (We use tf.variable for future - to load and continue training) # create empty variable on GPU. # we initialize it with zeros because the actual embeddings will be loaded on the fly. self.ent_emb = tf.Variable(np.zeros((self.corr_batch_size, self.internal_k)), dtype=tf.float32) # (We use tf.variable for future - to load and continue training) self.rel_emb = tf.Variable(self.trained_model_params[1], dtype=tf.float32)
Example #7
Source File: DNN.py From ULTRA with Apache License 2.0 | 4 votes |
def build(self, input_list, noisy_params=None, noise_rate=0.05, is_training=False, **kwargs): """ Create the DNN model Args: input_list: (list<tf.tensor>) A list of tensors containing the features for a list of documents. noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add. noise_rate: (float) A value specify how much noise to add. is_training: (bool) A flag indicating whether the model is running in training mode. Returns: A list of tf.Tensor containing the ranking scores for each instance in input_list. """ with tf.variable_scope(tf.get_variable_scope(), initializer=self.initializer, reuse=tf.AUTO_REUSE): input_data = tf.concat(input_list, axis=0) output_data = input_data output_sizes = self.hparams.hidden_layer_sizes + [1] if self.layer_norm is None and self.hparams.norm in BaseRankingModel.NORM_FUNC_DIC: self.layer_norm = [] for j in range(len(output_sizes)): self.layer_norm.append(BaseRankingModel.NORM_FUNC_DIC[self.hparams.norm]( name="layer_norm_%d" % j)) current_size = output_data.get_shape()[-1].value for j in range(len(output_sizes)): if self.layer_norm is not None: output_data = self.layer_norm[j]( output_data, training=is_training) expand_W = self.get_variable( "dnn_W_%d" % j, [current_size, output_sizes[j]], noisy_params=noisy_params, noise_rate=noise_rate) expand_b = self.get_variable("dnn_b_%d" % j, [ output_sizes[j]], noisy_params=noisy_params, noise_rate=noise_rate) output_data = tf.nn.bias_add( tf.matmul(output_data, expand_W), expand_b) # Add activation if it is a hidden layer if j != len(output_sizes) - 1: output_data = self.act_func(output_data) current_size = output_sizes[j] return tf.split(output_data, len(input_list), axis=0)