Python theano.map() Examples
The following are 27
code examples of theano.map().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano
, or try the search function
.
Example #1
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #2
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #3
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #4
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #5
Source File: theano_backend.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #6
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #7
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #8
Source File: TD_RvNN.py From Rumor_RvNN with MIT License | 6 votes |
def gradient_descent(self, loss): """Momentum GD with gradient clipping.""" grad = T.grad(loss, self.params) self.momentum_velocity_ = [0.] * len(grad) grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grad))) updates = OrderedDict() not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm)) scaling_den = T.maximum(5.0, grad_norm) for n, (param, grad) in enumerate(zip(self.params, grad)): grad = T.switch(not_finite, 0.1 * param, grad * (5.0 / scaling_den)) velocity = self.momentum_velocity_[n] update_step = self.momentum * velocity - self.learning_rate * grad self.momentum_velocity_[n] = update_step updates[param] = param + update_step return updates
Example #9
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #10
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #11
Source File: BU_RvNN.py From Rumor_RvNN with MIT License | 6 votes |
def gradient_descent(self, loss): """Momentum GD with gradient clipping.""" grad = T.grad(loss, self.params) self.momentum_velocity_ = [0.] * len(grad) grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grad))) updates = OrderedDict() not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm)) scaling_den = T.maximum(5.0, grad_norm) for n, (param, grad) in enumerate(zip(self.params, grad)): grad = T.switch(not_finite, 0.1 * param, grad * (5.0 / scaling_den)) velocity = self.momentum_velocity_[n] update_step = self.momentum * velocity - self.learning_rate * grad self.momentum_velocity_[n] = update_step updates[param] = param + update_step return updates
Example #12
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. # Arguments variables: tensor or list of tensors to consider constant with respect to any other variable. # Returns A single tensor or a list of tensors (depending on the passed argument) that has constant gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(theano.gradient.disconnected_grad, variables) else: return theano.gradient.disconnected_grad(variables) # CONTROL FLOW
Example #13
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def in_top_k(predictions, targets, k): '''Returns whether the `targets` are in the top `k` `predictions` # Arguments predictions: A tensor of shape batch_size x classess and type float32. targets: A tensor of shape batch_size and type int32 or int64. k: An int, number of top elements to consider. # Returns A tensor of shape batch_size and type int. output_i is 1 if targets_i is within top-k values of predictions_i ''' predictions_top_k = T.argsort(predictions)[:, -k:] result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets]) return result # CONVOLUTIONS
Example #14
Source File: theano_backend.py From keras-lambda with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #15
Source File: theano_backend.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #16
Source File: BU_RvNN.py From Rumor_RvNN with MIT License | 5 votes |
def compute_tree(self, x_word, x_index, tree): self.recursive_unit = self.create_recursive_unit() self.leaf_unit = self.create_leaf_unit() num_parents = tree.shape[0] # num internal nodes num_leaves = self.num_nodes - num_parents # compute leaf hidden states leaf_h, _ = theano.map( fn=self.leaf_unit, sequences=[ x_word[:num_leaves], x_index[:num_leaves] ]) if self.irregular_tree: init_node_h = T.concatenate([leaf_h, leaf_h, leaf_h], axis=0) else: init_node_h = leaf_h # use recurrence to compute internal node hidden states def _recurrence(x_word, x_index, node_info, t, node_h, last_h): child_exists = node_info > -1 offset = 2*num_leaves * int(self.irregular_tree) - child_exists * t ### offset??? child_h = node_h[node_info + offset] * child_exists.dimshuffle(0, 'x') ### transpose?? parent_h = self.recursive_unit(x_word, x_index, child_h, child_exists) node_h = T.concatenate([node_h, parent_h.reshape([1, self.hidden_dim])]) return node_h[1:], parent_h dummy = theano.shared(self.init_vector([self.hidden_dim])) (_, parent_h), _ = theano.scan( fn=_recurrence, outputs_info=[init_node_h, dummy], sequences=[x_word[num_leaves:], x_index[num_leaves:], tree, T.arange(num_parents)], n_steps=num_parents) return T.concatenate([leaf_h, parent_h], axis=0)
Example #17
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #18
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #19
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #20
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #21
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #22
Source File: theano_backend.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #23
Source File: theano_backend.py From GraphicDesignPatternByPython with MIT License | 5 votes |
def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0]
Example #24
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 5 votes |
def map_fn(fn, elems, name=None): '''Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn ''' return theano.map(fn, elems, name=name)[0]
Example #25
Source File: dcnn_train.py From twitter-sent-dnn with MIT License | 4 votes |
def __init__(self, rng, input, vocab_size, embed_dm, embeddings = None, ): """ input: theano.tensor.dmatrix, (number of instances, sentence word number) vocab_size: integer, the size of vocabulary, embed_dm: integer, the dimension of word vector representation embeddings: theano.tensor.TensorType pretrained embeddings """ if embeddings: print "Use pretrained embeddings: ON" assert embeddings.get_value().shape == (vocab_size, embed_dm), "%r != %r" %( embeddings.get_value().shape, (vocab_size, embed_dm) ) self.embeddings = embeddings else: print "Use pretrained embeddings: OFF" embedding_val = np.asarray( rng.normal(0, 0.05, size = (vocab_size, embed_dm)), dtype = theano.config.floatX ) embedding_val[vocab_size-1,:] = 0 # the <PADDING> character is intialized to 0 self.embeddings = theano.shared( np.asarray(embedding_val, dtype = theano.config.floatX), borrow = True, name = 'embeddings' ) self.params = [self.embeddings] self.param_shapes = [(vocab_size, embed_dm)] # Return: # :type, theano.tensor.tensor4 # :param, dimension(1, 1, word embedding dimension, number of words in sentence) # made to be 4D to fit into the dimension of convolution operation sent_embedding_list, updates = theano.map(lambda sent: self.embeddings[sent], input) sent_embedding_tensor = T.stacklists(sent_embedding_list) # make it into a 3D tensor self.output = sent_embedding_tensor.dimshuffle(0, 'x', 2, 1) # make it a 4D tensor
Example #26
Source File: helper.py From DeepSequence with MIT License | 4 votes |
def gen_full_alignment(self): # Get only the focus columns for seq_name,sequence in self.seq_name_to_sequence.items(): # Replace periods with dashes (the uppercase equivalent) sequence = sequence.replace(".","-") #then get only the focus columns self.seq_name_to_sequence[seq_name] = [sequence[ix].upper() for ix in self.focus_cols] # Remove sequences that have bad characters alphabet_set = set(list(self.alphabet)) seq_names_to_remove = [] for seq_name,sequence in self.seq_name_to_sequence.items(): for letter in sequence: if letter not in alphabet_set and letter != "-": seq_names_to_remove.append(seq_name) seq_names_to_remove = list(set(seq_names_to_remove)) for seq_name in seq_names_to_remove: del self.seq_name_to_sequence[seq_name] # Encode the sequences print ("Encoding sequences") self.x_train = np.zeros((len(self.seq_name_to_sequence.keys()),len(self.focus_cols),len(self.alphabet))) self.x_train_name_list = [] for i,seq_name in enumerate(self.seq_name_to_sequence.keys()): sequence = self.seq_name_to_sequence[seq_name] self.x_train_name_list.append(seq_name) for j,letter in enumerate(sequence): if letter in self.aa_dict: k = self.aa_dict[letter] self.x_train[i,j,k] = 1.0 # Fast sequence weights with Theano if self.calc_weights: print ("Computing sequence weights") # Numpy version # import scipy # from scipy.spatial.distance import pdist, squareform # self.weights = scale / np.sum(squareform(pdist(seq_index_array, metric="hamming")) < theta, axis=0) # # Theano weights X = T.tensor3("x") cutoff = T.scalar("theta") X_flat = X.reshape((X.shape[0], X.shape[1]*X.shape[2])) N_list, updates = theano.map(lambda x: 1.0 / T.sum(T.dot(X_flat, x) / T.dot(x, x) > 1 - cutoff), X_flat) weightfun = theano.function(inputs=[X, cutoff], outputs=[N_list],allow_input_downcast=True) # self.weights = weightfun(self.x_train, self.theta)[0] else: # If not using weights, use an isotropic weight matrix self.weights = np.ones(self.x_train.shape[0]) self.Neff = np.sum(self.weights) print ("Neff =",str(self.Neff)) print ("Data Shape =",self.x_train.shape)
Example #27
Source File: TD_RvNN.py From Rumor_RvNN with MIT License | 4 votes |
def compute_tree(self, x_word, x_index, num_parent, tree): self.recursive_unit = self.create_recursive_unit() #num_nodes = self.num_nodes+1 def ini_unit(x): return theano.shared(self.init_vector([self.hidden_dim])) #init_node_h = 0 * theano.shared(self.init_vector([self.num_nodes, self.hidden_dim])) init_node_h, _ = theano.scan( fn=ini_unit, sequences=[ x_word ]) #n_steps=num_nodes) #dummy = 0 * theano.shared(self.init_vector([self.hidden_dim])) #init_node_h = T.concatenate([dummy, all_node_h], axis=0) '''self.recursive_unit = self.create_recursive_unit() self.leaf_unit = self.create_leaf_unit() num_parents = tree.shape[0] # num internal nodes num_leaves = self.num_nodes - num_parents # compute leaf hidden states leaf_h, _ = theano.map( fn=self.leaf_unit, sequences=[ x_word[:num_leaves], x_index[:num_leaves] ]) if self.irregular_tree: init_node_h = T.concatenate([leaf_h, leaf_h, leaf_h], axis=0) else: init_node_h = leaf_h''' # use recurrence to compute internal node hidden states def _recurrence(x_word, x_index, node_info, node_h, last_h): parent_h = node_h[node_info[0]] child_h = self.recursive_unit(x_word, x_index, parent_h) #node_h[node_info[1]] = child_h node_h = T.concatenate([node_h[:node_info[1]], child_h.reshape([1, self.hidden_dim]), node_h[node_info[1]+1:] ]) '''#child_exists = node_info > -1 #offset = 2*num_leaves * int(self.irregular_tree) - child_exists * t ### offset??? child_h = node_h[node_info + offset] * child_exists.dimshuffle(0, 'x') ### transpose?? parent_h = self.recursive_unit(x_word, x_index, child_h, child_exists) node_h = T.concatenate([node_h, parent_h.reshape([1, self.hidden_dim])]) return node_h[1:], parent_h''' return node_h, child_h dummy = theano.shared(self.init_vector([self.hidden_dim])) (_, child_hs), _ = theano.scan( fn=_recurrence, outputs_info=[init_node_h, dummy], sequences=[x_word[:-1], x_index, tree]) '''dummy = theano.shared(self.init_vector([self.hidden_dim])) (_, parent_h), _ = theano.scan( fn=_recurrence, outputs_info=[init_node_h, dummy], sequences=[x_word[num_leaves:], x_index[num_leaves:], tree, T.arange(num_parents)], n_steps=num_parents) return T.concatenate([leaf_h, parent_h], axis=0)''' return child_hs[num_parent-1:]