Python theano.tensor.matrix() Examples
The following are 30
code examples of theano.tensor.matrix().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: test_nnet.py From D-VAE with MIT License | 6 votes |
def test_grad(self): c = T.matrix() p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x') # test that function contains softmax and softmaxgrad w = T.matrix() backup = config.warn.sum_div_dimshuffle_bug config.warn.sum_div_dimshuffle_bug = False try: g = theano.function([c, w], T.grad((p_y * w).sum(), c)) hasattr(g.maker.fgraph.outputs[0].tag, 'trace') finally: config.warn.sum_div_dimshuffle_bug = backup g_ops = [n.op for n in g.maker.fgraph.toposort()] # print '--- g =' # printing.debugprint(g) # print '===' raise SkipTest('Optimization not enabled for the moment') assert len(g_ops) == 2 assert softmax_op in g_ops assert softmax_grad in g_ops g(self.rng.rand(3, 4), self.rng.uniform(.5, 1, (3, 4)))
Example #2
Source File: SdA_train.py From adage with BSD 3-Clause "New" or "Revised" License | 6 votes |
def return_network(self): '''This function returns weight matrix and bias vectors of each hidden layer in the final network after training.''' weights_all_layer = [] bias_all_layer = [] bias_prime_all_layer = [] for dA_layer in self.dA_layers: weight = dA_layer.W.get_value(borrow = True) bias = dA_layer.b.get_value(borrow = True) bias_prime = dA_layer.b_prime.get_value(borrow = True) weights_all_layer.append(weight) bias_all_layer.append(bias) bias_prime_all_layer.append(bias_prime) return weights_all_layer, bias_all_layer, bias_prime_all_layer
Example #3
Source File: skipthoughts.py From StackGAN with MIT License | 6 votes |
def build_encoder_bi(tparams, options): """ build bidirectional encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') embeddingr = embedding[::-1] x_mask = tensor.matrix('x_mask', dtype='float32') xr_mask = x_mask[::-1] # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) projr = get_layer(options['encoder'])[1](tparams, embeddingr, options, prefix='encoder_r', mask=xr_mask) ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1) return embedding, x_mask, ctx # some utilities
Example #4
Source File: skipthoughts.py From text-to-image with MIT License | 6 votes |
def build_encoder_bi(tparams, options): """ build bidirectional encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') embeddingr = embedding[::-1] x_mask = tensor.matrix('x_mask', dtype='float32') xr_mask = x_mask[::-1] # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) projr = get_layer(options['encoder'])[1](tparams, embeddingr, options, prefix='encoder_r', mask=xr_mask) ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1) return embedding, x_mask, ctx # some utilities
Example #5
Source File: convolutional_nn.py From Projects with MIT License | 6 votes |
def __init__(self,convolutional_layers,feature_maps,filter_shapes,poolsize,feedforward_layers,feedforward_nodes,classes,learning_rate,regularization): self.input = T.tensor4() self.convolutional_layers = [] self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1],poolsize[0])) for i in range(1,convolutional_layers): self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],poolsize[i])) self.feedforward_layers = [] self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),flattened,feedforward_nodes[0])) for i in range(1,feedforward_layers): self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i])) self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes) self.params = [] for l in self.convolutional_layers + self.feedforward_layers: self.params.extend(l.get_params()) self.params.extend(self.output_layer.get_params()) self.target = T.matrix() self.output = self.output_layer.output self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output) self.cost = self.cost.mean() for i in range(convolutional_layers+feedforward_layers+1): self.cost += regularization*(self.params[2*i]**2).mean() self.gparams = [T.grad(self.cost, param) for param in self.params] self.propogate = theano.function([self.input,self.target],self.cost,updates=[(param,param-learning_rate*gparam) for param,gparam in zip(self.params,self.gparams)],allow_input_downcast=True) self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
Example #6
Source File: theano_nn.py From Projects with MIT License | 6 votes |
def __init__(self,classes,hidden_layers,features,nodes_per_hidden_layer,learning_rate,regularization): self.hidden_layers = [] self.hidden_layers.append(layer(features,nodes_per_hidden_layer)) for i in range(hidden_layers-1): self.hidden_layers.append(layer(nodes_per_hidden_layer,nodes_per_hidden_layer)) self.output_layer = layer(nodes_per_hidden_layer,classes) self.params = [] for l in self.hidden_layers: self.params.extend(l.get_params()) self.params.extend(self.output_layer.get_params()) self.A = T.matrix() self.t = T.matrix() self.s = 1/(1+T.exp(-T.dot(self.A,self.params[0])-self.params[1])) for i in range(hidden_layers): self.s = 1/(1+T.exp(-T.dot(self.s,self.params[2*(i+1)])-self.params[2*(i+1)+1])) self.cost = -self.t*T.log(self.s)-(1-self.t)*T.log(1-self.s) self.cost = self.cost.mean() for i in range(hidden_layers+1): self.cost += regularization*(self.params[2*i]**2).mean() self.gparams = [T.grad(self.cost, param) for param in self.params] self.propogate = theano.function([self.A,self.t],self.cost,updates=[(param,param-learning_rate*gparam) for param,gparam in zip(self.params,self.gparams)],allow_input_downcast=True) self.classify = theano.function([self.A],self.s,allow_input_downcast=True)
Example #7
Source File: conv_net.py From Projects with MIT License | 6 votes |
def __init__(self,convolutional_layers,feature_maps,filter_shapes,poolsize,feedforward_layers,feedforward_nodes,classes,regularization): self.input = T.tensor4() self.convolutional_layers = [] self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1],poolsize[0])) for i in range(1,convolutional_layers): self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],poolsize[i])) self.feedforward_layers = [] self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),flattened,feedforward_nodes[0])) for i in range(1,feedforward_layers): self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i])) self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes) self.params = [] for l in self.convolutional_layers + self.feedforward_layers: self.params.extend(l.get_params()) self.params.extend(self.output_layer.get_params()) self.target = T.matrix() self.output = self.output_layer.output self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output) self.cost = self.cost.mean() for i in range(convolutional_layers+feedforward_layers+1): self.cost += regularization*(self.params[2*i]**2).mean() self.updates = self.adam(self.cost,self.params) self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
Example #8
Source File: convnet.py From Projects with MIT License | 6 votes |
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes): self.input = T.tensor4() self.convolutional_layers = [] self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1])) for i in range(1,convolutional_layers): if i==2 or i==4: self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2))) else: self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1])) self.feedforward_layers = [] self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),20480,feedforward_nodes[0])) for i in range(1,feedforward_layers): self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i])) self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes) self.params = [] for l in self.convolutional_layers + self.feedforward_layers: self.params.extend(l.get_params()) self.params.extend(self.output_layer.get_params()) self.target = T.matrix() self.output = self.output_layer.output self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output) self.cost = self.cost.mean() self.updates = self.adam(self.cost, self.params) self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
Example #9
Source File: preprocessing.py From Projects with MIT License | 6 votes |
def __init__(self): X_in = T.matrix('X_in') u = T.matrix('u') s = T.vector('s') eps = T.scalar('eps') X_ = X_in - T.mean(X_in, 0) sigma = T.dot(X_.T, X_) / X_.shape[0] self.sigma = theano.function([X_in], sigma, allow_input_downcast=True) Z = T.dot(T.dot(u, T.nlinalg.diag(1. / T.sqrt(s + eps))), u.T) X_zca = T.dot(X_, Z.T) self.compute_zca = theano.function([X_in, u, s, eps], X_zca, allow_input_downcast=True) self._u = None self._s = None
Example #10
Source File: conv2d_crossvalidation.py From Projects with MIT License | 6 votes |
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes): self.input = T.tensor4() self.convolutional_layers = [] self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1])) for i in range(1,convolutional_layers): if i==3: self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2))) else: self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1])) self.feedforward_layers = [] self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),40000,feedforward_nodes[0])) for i in range(1,feedforward_layers): self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i])) self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes) self.params = [] for l in self.convolutional_layers + self.feedforward_layers: self.params.extend(l.get_params()) self.params.extend(self.output_layer.get_params()) self.target = T.matrix() self.output = self.output_layer.output self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output) self.cost = self.cost.mean() self.updates = self.adam(self.cost, self.params) self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
Example #11
Source File: conv2d_predict.py From Projects with MIT License | 6 votes |
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes): self.input = T.tensor4() self.convolutional_layers = [] self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1])) for i in range(1,convolutional_layers): if i==3: self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2))) else: self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1])) self.feedforward_layers = [] self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),40000,feedforward_nodes[0])) for i in range(1,feedforward_layers): self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i])) self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes) self.params = [] for l in self.convolutional_layers + self.feedforward_layers: self.params.extend(l.get_params()) self.params.extend(self.output_layer.get_params()) self.target = T.matrix() self.output = self.output_layer.output self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output) self.cost = self.cost.mean() self.updates = self.adam(self.cost, self.params) self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.classify = theano.function([self.input],self.output,allow_input_downcast=True)
Example #12
Source File: rbm_pretraining.py From Projects with MIT License | 6 votes |
def __init__(self,rbm1,rbm2,rbm3,rbm4): self.learning_rate = 0.01 self.W1 = rbm1.W self.W2 = rbm2.W self.W3 = rbm3.W self.W4 = rbm4.W self.W5 = theano.shared(self.ortho_weight(1000,10),borrow=True) self.b1 = rbm1.hbias self.b2 = rbm2.hbias self.b3 = rbm3.hbias self.b4 = rbm4.hbias self.b5 = (theano.shared(np.zeros((10,), dtype=theano.config.floatX),borrow=True)) self.input = T.matrix() self.target = T.matrix() self.l1out = T.nnet.sigmoid(T.dot(self.input,self.W1)+self.b1) self.l2out = T.nnet.sigmoid(T.dot(self.l1out,self.W2)+self.b2) self.l3out = T.nnet.sigmoid(T.dot(self.l2out,self.W3)+self.b3) self.l4out = T.nnet.sigmoid(T.dot(self.l3out,self.W4)+self.b4) self.output = T.nnet.softmax(T.dot(self.l4out,self.W5)+self.b5) self.cost = T.nnet.categorical_crossentropy(self.output,self.target).mean() self.params = [self.W1,self.W2,self.W3,self.W4,self.W5,self.b1,self.b2,self.b3,self.b4,self.b5] self.updates = self.adam(self.cost,self.params) self.train_f = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.predict_f = theano.function([self.input],self.output,allow_input_downcast=True)
Example #13
Source File: residual_gradient_descent.py From Projects with MIT License | 6 votes |
def __init__(self,hidden_layers,layer_nodes): self.input = T.matrix() self.target = T.matrix() self.W = [] self.b = [] self.activations = [] self.W.append(theano.shared(self.ortho_weight(784,layer_nodes),borrow=True)) self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True)) self.activations.append(T.nnet.sigmoid(T.dot(self.input,self.W[-1])+self.b[-1])) self.residuals = self.activations[-1].copy() for layer in range(hidden_layers-1): self.W.append(theano.shared(self.ortho_weight(layer_nodes,layer_nodes),borrow=True)) self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True)) self.activations.append(T.nnet.sigmoid(T.dot(self.residuals,self.W[-1])+self.b[-1])) self.residuals += self.activations[-1] self.W.append(theano.shared(self.ortho_weight(layer_nodes,10),borrow=True)) self.b.append(theano.shared(np.zeros((10,), dtype=theano.config.floatX),borrow=True)) self.activations.append(T.nnet.softmax(T.dot(self.residuals,self.W[-1])+self.b[-1])) self.cost = T.nnet.categorical_crossentropy(self.activations[-1],self.target).mean() self.params = self.W+self.b self.updates = self.adam(self.cost,self.params) self.train_f = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.predict_f = theano.function([self.input],self.activations[-1],allow_input_downcast=True)
Example #14
Source File: gradient_descent.py From Projects with MIT License | 6 votes |
def __init__(self,hidden_layers,layer_nodes): self.input = T.matrix() self.target = T.matrix() self.W = [] self.b = [] self.activations = [] self.W.append(theano.shared(self.ortho_weight(784,layer_nodes),borrow=True)) self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True)) self.activations.append(T.nnet.sigmoid(T.dot(self.input,self.W[-1])+self.b[-1])) for layer in range(hidden_layers-1): self.W.append(theano.shared(self.ortho_weight(layer_nodes,layer_nodes),borrow=True)) self.b.append(theano.shared(np.zeros((layer_nodes,), dtype=theano.config.floatX),borrow=True)) self.activations.append(T.nnet.sigmoid(T.dot(self.activations[-1],self.W[-1])+self.b[-1])) self.W.append(theano.shared(self.ortho_weight(layer_nodes,10),borrow=True)) self.b.append(theano.shared(np.zeros((10,), dtype=theano.config.floatX),borrow=True)) self.activations.append(T.nnet.softmax(T.dot(self.activations[-1],self.W[-1])+self.b[-1])) self.cost = T.nnet.categorical_crossentropy(self.activations[-1],self.target).mean() self.params = self.W+self.b self.updates = self.adam(self.cost,self.params) self.train_f = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True) self.predict_f = theano.function([self.input],self.activations[-1],allow_input_downcast=True)
Example #15
Source File: aa.py From D-VAE with MIT License | 6 votes |
def __init__(self): super(M, self).__init__() x = T.matrix('x') # input, target self.w = module.Member(T.matrix('w')) # weights self.a = module.Member(T.vector('a')) # hid bias self.b = module.Member(T.vector('b')) # output bias self.hid = T.tanh(T.dot(x, self.w) + self.a) hid = self.hid self.out = T.tanh(T.dot(hid, self.w.T) + self.b) out = self.out self.err = 0.5 * T.sum((out - x)**2) err = self.err params = [self.w, self.a, self.b] gparams = T.grad(err, params) updates = [(p, p - 0.01 * gp) for p, gp in zip(params, gparams)] self.step = module.Method([x], err, updates=dict(updates))
Example #16
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_local_mul_s_d(): if not theano.config.cxx: raise SkipTest("G++ not available, so we need to skip this test.") mode = theano.compile.mode.get_default_mode() mode = mode.including("specialize", "local_mul_s_d") for sp_format in sparse.sparse_formats: inputs = [getattr(theano.sparse, sp_format + '_matrix')(), tensor.matrix()] f = theano.function(inputs, sparse.mul_s_d(*inputs), mode=mode) assert not any(isinstance(node.op, sparse.MulSD) for node in f.maker.fgraph.toposort())
Example #17
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def __generalized_sd_test(self, theanop, symbolicType, testOp, scipyType): scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] if (bool(scipy_ver < [0, 13])): raise SkipTest("comparison operators need newer release of scipy") x = symbolicType() y = theano.tensor.matrix() op = theanop(x, y) f = theano.function([x, y], op) m1 = scipyType(random_lil((10, 40), config.floatX, 3)) m2 = self._rand_ranged(1000, -1000, [10, 40]) self.assertTrue(numpy.array_equal(f(m1, m2).data, testOp(m1, m2).data))
Example #18
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_equality_case(self): """ Test assuring normal behaviour when values in the matrices are equal """ scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] if (bool(scipy_ver < [0, 13])): raise SkipTest("comparison operators need newer release of scipy") x = sparse.csc_matrix() y = theano.tensor.matrix() m1 = sp.csc_matrix((2, 2), dtype=theano.config.floatX) m2 = numpy.asarray([[0, 0], [0, 0]], dtype=theano.config.floatX) for func in self.testsDic: op = func(y, x) f = theano.function([y, x], op) self.assertTrue(numpy.array_equal(f(m2, m1), self.testsDic[func](m2, m1)))
Example #19
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_dot_sparse_sparse(self): # test dot for 2 input sparse matrix sparse_dtype = 'float64' sp_mat = {'csc': sp.csc_matrix, 'csr': sp.csr_matrix, 'bsr': sp.csr_matrix} for sparse_format_a in ['csc', 'csr', 'bsr']: for sparse_format_b in ['csc', 'csr', 'bsr']: a = SparseType(sparse_format_a, dtype=sparse_dtype)() b = SparseType(sparse_format_b, dtype=sparse_dtype)() d = theano.dot(a, b) f = theano.function([a, b], theano.Out(d, borrow=True)) topo = f.maker.fgraph.toposort() for M, N, K, nnz in [(4, 3, 2, 3), (40, 30, 20, 3), (40, 30, 20, 30), (400, 3000, 200, 6000), ]: a_val = sp_mat[sparse_format_a]( random_lil((M, N), sparse_dtype, nnz)) b_val = sp_mat[sparse_format_b]( random_lil((N, K), sparse_dtype, nnz)) f(a_val, b_val)
Example #20
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_csr_dense(self): x = theano.sparse.csr_matrix('x') y = theano.tensor.matrix('y') v = theano.tensor.vector('v') for (x, y, x_v, y_v) in [(x, y, self.x_csr, self.y), (x, v, self.x_csr, self.v_100), (v, x, self.v_10, self.x_csr)]: f_a = theano.function([x, y], theano.sparse.dot(x, y)) f_b = lambda x, y: x * y utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check([x, y], [theano.sparse.dot(x, y)], [x_v, y_v], (Dot, Usmm, UsmmCscDense))
Example #21
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_csc_dense(self): x = theano.sparse.csc_matrix('x') y = theano.tensor.matrix('y') v = theano.tensor.vector('v') for (x, y, x_v, y_v) in [(x, y, self.x_csc, self.y), (x, v, self.x_csc, self.v_100), (v, x, self.v_10, self.x_csc)]: f_a = theano.function([x, y], theano.sparse.dot(x, y)) f_b = lambda x, y: x * y utt.assert_allclose(f_a(x_v, y_v), f_b(x_v, y_v)) # Test infer_shape self._compile_and_check([x, y], [theano.sparse.dot(x, y)], [x_v, y_v], (Dot, Usmm, UsmmCscDense))
Example #22
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_int32_dtype(self): # Reported on the theano-user mailing-list: # https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ size = 9 intX = 'int32' C = tensor.matrix('C', dtype=intX) I = tensor.matrix('I', dtype=intX) fI = I.flatten() data = tensor.ones_like(fI) indptr = tensor.arange(data.shape[0] + 1, dtype='int32') m1 = sparse.CSR(data, fI, indptr, (8, size)) m2 = sparse.dot(m1, C) y = m2.reshape(shape=(2, 4, 9), ndim=3) f = theano.function(inputs=[I, C], outputs=y) i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX) a = numpy.asarray(numpy.random.randint(0, 100, (size, size)), dtype=intX) f(i, a)
Example #23
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_sparse_shared_memory(): # Note : There are no inplace ops on sparse matrix yet. If one is # someday implemented, we could test it here. a = random_lil((3, 4), 'float32', 3).tocsr() m1 = random_lil((4, 4), 'float32', 3).tocsr() m2 = random_lil((4, 4), 'float32', 3).tocsr() x = SparseType('csr', dtype='float32')() y = SparseType('csr', dtype='float32')() sdot = theano.sparse.structured_dot z = sdot(x * 3, m1) + sdot(y * 2, m2) f = theano.function([theano.In(x, mutable=True), theano.In(y, mutable=True)], z, mode='FAST_RUN') def f_(x, y, m1=m1, m2=m2): return ((x * 3) * m1) + ((y * 2) * m2) assert SparseType.may_share_memory(a, a) # This is trivial result = f(a, a) result_ = f_(a, a) assert (result_.todense() == result.todense()).all()
Example #24
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_size(): """ Ensure the `size` attribute of sparse matrices behaves as in numpy. """ for sparse_type in ('csc_matrix', 'csr_matrix'): x = getattr(theano.sparse, sparse_type)() y = getattr(scipy.sparse, sparse_type)((5, 7)).astype(config.floatX) get_size = theano.function([x], x.size) def check(): assert y.size == get_size(y) # We verify that the size is correctly updated as we store more data # into the sparse matrix (including zeros). check() y[0, 0] = 1 check() y[0, 1] = 0 check()
Example #25
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def structure_function(f, index=0): """Decorator to structure a function wich apply on dense matrix. Here, the inputs of the function must be dense matrix. The sparse pattern is determined by finding the zeros. :param index: The index of the parameter from wich the function must be structured. :return: The structured function for its `index` parameter. """ def structured_function(*args): pattern = args[index] evaluated = f(*args) evaluated[pattern == 0] = 0 return evaluated return structured_function
Example #26
Source File: test_nnet.py From D-VAE with MIT License | 6 votes |
def test_basic(self): c = T.matrix() p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x') # test that function contains softmax and no div. f = theano.function([c], p_y, mode=self.mode) assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace') f_ops = [n.op for n in f.maker.fgraph.toposort()] # print '--- f =' # printing.debugprint(f) # print '===' assert len(f_ops) == 1 assert softmax_op in f_ops f(self.rng.rand(3, 4).astype(config.floatX))
Example #27
Source File: test_nnet.py From D-VAE with MIT License | 6 votes |
def test_basic_keepdims(self): c = T.matrix() p_y = T.exp(c) / T.exp(c).sum(axis=1, keepdims=True) # test that function contains softmax and no div. f = theano.function([c], p_y, mode=self.mode) assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace') f_ops = [n.op for n in f.maker.fgraph.toposort()] # print '--- f =' # printing.debugprint(f) # print '===' assert len(f_ops) == 1 assert softmax_op in f_ops f(self.rng.rand(3, 4).astype(config.floatX))
Example #28
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_local_sampling_dot_csr(): if not theano.config.cxx: raise SkipTest("G++ not available, so we need to skip this test.") mode = theano.compile.mode.get_default_mode() mode = mode.including("specialize", "local_sampling_dot_csr") for sp_format in ['csr']: # Not implemented for other format inputs = [tensor.matrix(), tensor.matrix(), getattr(theano.sparse, sp_format + '_matrix')()] f = theano.function(inputs, sparse.sampling_dot(*inputs), mode=mode) if theano.config.blas.ldflags: assert not any(isinstance(node.op, sparse.SamplingDot) for node in f.maker.fgraph.toposort()) else: # SamplingDotCSR's C implementation needs blas, so it should not # be inserted assert not any(isinstance(node.op, sparse.opt.SamplingDotCSR) for node in f.maker.fgraph.toposort())
Example #29
Source File: test_nnet.py From D-VAE with MIT License | 5 votes |
def test_infer_shape(self): admat = matrix() admat_val = numpy.random.rand(3, 4).astype(config.floatX) self._compile_and_check([admat], [Softmax()(admat)], [admat_val], Softmax)
Example #30
Source File: test_nnet.py From D-VAE with MIT License | 5 votes |
def test_neg_idx(self): admat = matrix() advec = vector() alvec = lvector() rng = numpy.random.RandomState(utt.fetch_seed()) admat_val = rng.rand(10, 5).astype(config.floatX) admat_val /= admat_val.sum(axis=1).reshape(10, 1) advec_val = rng.rand(10).astype(config.floatX) alvec_val = rng.randint(low=0, high=5, size=10) alvec_val[1] = -1 out = CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec) f = theano.function([advec, admat, alvec], out) self.assertRaises(ValueError, f, advec_val, admat_val, alvec_val)