Python theano.tensor.tensor3() Examples
The following are 30
code examples of theano.tensor.tensor3().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: utils.py From RaptorX-Contact with GNU General Public License v3.0 | 6 votes |
def Compatible(list1, list2): if len(list1) != len(list2): return False for l1, l2 in zip(list1, list2): if type(l1.get_value()) != type(l2): return False if np.isscalar(l1.get_value()): continue if l1.get_value().shape != l2.shape: return False return True ##generate the tile of a small tensor x, the first 2 dims will be expanded ## x is a small matrix or tensor3 to be tiled, y is a tuple of 2 elements ## This function generates a tile of x by copying it y*y times ## The resultant matrix shall have dimension ( x.shape[0]*y, x.shape[1]*y), consisting of y*y copies of x
Example #2
Source File: test_retain.py From retain with BSD 3-Clause "New" or "Revised" License | 6 votes |
def build_model(tparams, options): alphaHiddenDimSize = options['alphaHiddenDimSize'] betaHiddenDimSize = options['betaHiddenDimSize'] x = T.tensor3('x', dtype=config.floatX) reverse_emb_t = x[::-1] reverse_h_a = gru_layer(tparams, reverse_emb_t, 'a', alphaHiddenDimSize)[::-1] * 0.5 reverse_h_b = gru_layer(tparams, reverse_emb_t, 'b', betaHiddenDimSize)[::-1] * 0.5 preAlpha = T.dot(reverse_h_a, tparams['w_alpha']) + tparams['b_alpha'] preAlpha = preAlpha.reshape((preAlpha.shape[0], preAlpha.shape[1])) alpha = (T.nnet.softmax(preAlpha.T)).T beta = T.tanh(T.dot(reverse_h_b, tparams['W_beta']) + tparams['b_beta']) return x, alpha, beta
Example #3
Source File: recurrent.py From CAPTCHA-breaking with MIT License | 6 votes |
def __init__(self, input_dim, output_dim, init='glorot_uniform', inner_init='orthogonal', activation='sigmoid', weights=None, truncate_gradient=-1, return_sequences=False): super(SimpleRNN, self).__init__() self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.input_dim = input_dim self.output_dim = output_dim self.truncate_gradient = truncate_gradient self.activation = activations.get(activation) self.return_sequences = return_sequences self.input = T.tensor3() self.W = self.init((self.input_dim, self.output_dim)) self.U = self.inner_init((self.output_dim, self.output_dim)) self.b = shared_zeros((self.output_dim)) self.params = [self.W, self.U, self.b] if weights is not None: self.set_weights(weights)
Example #4
Source File: skipthoughts.py From StackGAN with MIT License | 6 votes |
def build_encoder_bi(tparams, options): """ build bidirectional encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') embeddingr = embedding[::-1] x_mask = tensor.matrix('x_mask', dtype='float32') xr_mask = x_mask[::-1] # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) projr = get_layer(options['encoder'])[1](tparams, embeddingr, options, prefix='encoder_r', mask=xr_mask) ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1) return embedding, x_mask, ctx # some utilities
Example #5
Source File: skipthoughts.py From TAC-GAN with GNU General Public License v3.0 | 6 votes |
def build_encoder_bi(tparams, options): """ build bidirectional encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') embeddingr = embedding[::-1] x_mask = tensor.matrix('x_mask', dtype='float32') xr_mask = x_mask[::-1] # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) projr = get_layer(options['encoder'])[1](tparams, embeddingr, options, prefix='encoder_r', mask=xr_mask) ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1) return embedding, x_mask, ctx # some utilities
Example #6
Source File: skipthoughts.py From text-to-image with MIT License | 6 votes |
def build_encoder_bi(tparams, options): """ build bidirectional encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') embeddingr = embedding[::-1] x_mask = tensor.matrix('x_mask', dtype='float32') xr_mask = x_mask[::-1] # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) projr = get_layer(options['encoder'])[1](tparams, embeddingr, options, prefix='encoder_r', mask=xr_mask) ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1) return embedding, x_mask, ctx # some utilities
Example #7
Source File: test_recurrent.py From attention-lvcsr with MIT License | 6 votes |
def test_super_in_recurrent_overrider(): # A regression test for the issue #475 class SimpleRecurrentWithContext(SimpleRecurrent): @application(contexts=['context']) def apply(self, context, *args, **kwargs): kwargs['inputs'] += context return super(SimpleRecurrentWithContext, self).apply(*args, **kwargs) @apply.delegate def apply_delegate(self): return super(SimpleRecurrentWithContext, self).apply brick = SimpleRecurrentWithContext(100, Tanh()) inputs = tensor.tensor3('inputs') context = tensor.matrix('context').dimshuffle('x', 0, 1) brick.apply(context, inputs=inputs)
Example #8
Source File: skipthoughts.py From text-to-image with MIT License | 6 votes |
def build_encoder_bi(tparams, options): """ build bidirectional encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') embeddingr = embedding[::-1] x_mask = tensor.matrix('x_mask', dtype='float32') xr_mask = x_mask[::-1] # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) projr = get_layer(options['encoder'])[1](tparams, embeddingr, options, prefix='encoder_r', mask=xr_mask) ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1) return embedding, x_mask, ctx # some utilities
Example #9
Source File: test_recurrent.py From attention-lvcsr with MIT License | 6 votes |
def test_saved_inner_graph(): """Make sure that the original inner graph is saved.""" x = tensor.tensor3() recurrent = SimpleRecurrent(dim=3, activation=Tanh()) y = recurrent.apply(x) application_call = get_application_call(y) assert application_call.inner_inputs assert application_call.inner_outputs cg = ComputationGraph(application_call.inner_outputs) # Check that the inner scan graph is annotated # with `recurrent.apply` assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3 # Check that the inner graph is equivalent to the one # produced by a stand-alone of `recurrent.apply` assert is_same_graph(application_call.inner_outputs[0], recurrent.apply(*application_call.inner_inputs, iterate=False))
Example #10
Source File: test_recurrent.py From attention-lvcsr with MIT License | 6 votes |
def test(self): x = tensor.tensor3('x') mask = tensor.matrix('mask') calc_bidir = theano.function([x, mask], [self.bidir.apply(x, mask=mask)]) calc_simple = theano.function([x, mask], [self.simple.apply(x, mask=mask)]) h_bidir = calc_bidir(self.x_val, self.mask_val)[0] h_simple = calc_simple(self.x_val, self.mask_val)[0] h_simple_rev = calc_simple(self.x_val[::-1], self.mask_val[::-1])[0] output_names = self.bidir.apply.outputs assert output_names == ['states'] assert_allclose(h_simple, h_bidir[..., :3], rtol=1e-04) assert_allclose(h_simple_rev, h_bidir[::-1, ..., 3:], rtol=1e-04)
Example #11
Source File: test_extra_ops.py From D-VAE with MIT License | 6 votes |
def test_perform(self): x = tensor.matrix() y = tensor.scalar() f = function([x, y], fill_diagonal(x, y)) for shp in [(8, 8), (5, 8), (8, 5)]: a = numpy.random.rand(*shp).astype(config.floatX) val = numpy.cast[config.floatX](numpy.random.rand()) out = f(a, val) # We can't use numpy.fill_diagonal as it is bugged. assert numpy.allclose(numpy.diag(out), val) assert (out == val).sum() == min(a.shape) # test for 3d tensor a = numpy.random.rand(3, 3, 3).astype(config.floatX) x = tensor.tensor3() y = tensor.scalar() f = function([x, y], fill_diagonal(x, y)) val = numpy.cast[config.floatX](numpy.random.rand() + 10) out = f(a, val) # We can't use numpy.fill_diagonal as it is bugged. assert out[0, 0, 0] == val assert out[1, 1, 1] == val assert out[2, 2, 2] == val assert (out == val).sum() == min(a.shape)
Example #12
Source File: test_basic.py From attention-lvcsr with MIT License | 6 votes |
def test_correct_answer(self): a = T.matrix() b = T.matrix() x = T.tensor3() y = T.tensor3() A = numpy.cast[theano.config.floatX](numpy.random.rand(5, 3)) B = numpy.cast[theano.config.floatX](numpy.random.rand(7, 2)) X = numpy.cast[theano.config.floatX](numpy.random.rand(5, 6, 1)) Y = numpy.cast[theano.config.floatX](numpy.random.rand(1, 9, 3)) make_list((3., 4.)) c = make_list((a, b)) z = make_list((x, y)) fc = theano.function([a, b], c) fz = theano.function([x, y], z) self.assertTrue((m == n).all() for m, n in zip(fc(A, B), [A, B])) self.assertTrue((m == n).all() for m, n in zip(fz(X, Y), [X, Y]))
Example #13
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def setUp(self): super(Test_local_elemwise_alloc, self).setUp() self.fast_run_mode = mode_with_gpu # self.vec = tensor.vector('vec', dtype=dtype) # self.mat = tensor.matrix('mat', dtype=dtype) # self.tens = tensor.tensor3('tens', dtype=dtype) # self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2) # self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape) self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2) self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape) self.alloc_w_dep_tens = basic_ops.gpu_alloc( self.vec, self.tens.shape[0], self.tens.shape[1] ) self.tv_wo_dep = basic_ops.gpu_alloc(self.vec, 5, 5) self.tm_wo_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5) self.s = tensor.iscalar('s') self.tv_w_dep = basic_ops.gpu_alloc(self.vec, self.s, self.s) self.tm_w_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5) self.row = tensor.row(dtype=self.dtype) self.o = basic_ops.gpu_alloc(self.row, 5, 5)
Example #14
Source File: test_opt.py From attention-lvcsr with MIT License | 6 votes |
def setUp(self): super(Test_local_elemwise_alloc, self).setUp() self.fast_run_mode = mode_with_gpu # self.vec = tensor.vector('vec', dtype=dtype) # self.mat = tensor.matrix('mat', dtype=dtype) # self.tens = tensor.tensor3('tens', dtype=dtype) # self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2) # self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape) self.alloc_wo_dep = basic_ops.gpu_alloc(self.vec, 2, 2) self.alloc_w_dep = basic_ops.gpu_alloc(self.vec, *self.mat.shape) self.alloc_w_dep_tens = basic_ops.gpu_alloc( self.vec, self.tens.shape[0], self.tens.shape[1] ) self.tv_wo_dep = basic_ops.gpu_alloc(self.vec, 5, 5) self.tm_wo_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5) self.s = tensor.iscalar('s') self.tv_w_dep = basic_ops.gpu_alloc(self.vec, self.s, self.s) self.tm_w_dep = basic_ops.gpu_alloc(self.mat, 5, 5, 5) self.row = tensor.row(dtype=self.dtype) self.o = basic_ops.gpu_alloc(self.row, 5, 5)
Example #15
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_correct_answer(self): a = T.matrix() b = T.matrix() x = T.tensor3() y = T.tensor3() A = numpy.cast[theano.config.floatX](numpy.random.rand(5, 3)) B = numpy.cast[theano.config.floatX](numpy.random.rand(7, 2)) X = numpy.cast[theano.config.floatX](numpy.random.rand(5, 6, 1)) Y = numpy.cast[theano.config.floatX](numpy.random.rand(1, 9, 3)) make_list((3., 4.)) c = make_list((a, b)) z = make_list((x, y)) fc = theano.function([a, b], c) fz = theano.function([x, y], z) self.assertTrue((m == n).all() for m, n in zip(fc(A, B), [A, B])) self.assertTrue((m == n).all() for m, n in zip(fz(X, Y), [X, Y]))
Example #16
Source File: test_recurrent.py From attention-lvcsr with MIT License | 6 votes |
def test(self): X = tensor.tensor3('X') out, H2, out_2, H = self.recurrent_example.apply( inputs=X, mask=None) x_val = numpy.ones((5, 1, 1), dtype=theano.config.floatX) h = H.eval({X: x_val}) h2 = H2.eval({X: x_val}) out_eval = out.eval({X: x_val}) out_2_eval = out_2.eval({X: x_val}) # This also implicitly tests that the initial states are zeros assert_allclose(h, x_val.cumsum(axis=0)) assert_allclose(h2, .5 * (numpy.arange(5).reshape((5, 1, 1)) + 1)) assert_allclose(h * 10, out_eval) assert_allclose(h2 * 10, out_2_eval)
Example #17
Source File: test_model.py From ntm-one-shot with MIT License | 6 votes |
def test_shape(): input_var = T.tensor3('input') target_var = T.imatrix('target') output_var, _, _ = memory_augmented_neural_network( input_var, target_var, batch_size=16, nb_class=5, memory_shape=(128, 40), controller_size=200, input_size=20 * 20, nb_reads=4) posterior_fn = theano.function([input_var, target_var], output_var) test_input = np.random.rand(16, 50, 20 * 20) test_target = np.random.randint(5, size=(16, 50)).astype('int32') test_input_invalid_batch_size = np.random.rand(16 + 1, 50, 20 * 20) test_input_invalid_depth = np.random.rand(16, 50, 20 * 20 - 1) test_output = posterior_fn(test_input, test_target) assert test_output.shape == (16, 50, 5) with pytest.raises(ValueError) as e_info: posterior_fn(test_input_invalid_batch_size, test_target) with pytest.raises(ValueError) as e_info: posterior_fn(test_input_invalid_depth, test_target)
Example #18
Source File: recognizer.py From attention-lvcsr with MIT License | 6 votes |
def __init__(self, activation, dims=None, **kwargs): super(SpeechBottom, self).__init__(**kwargs) self.num_features = self.input_dims['recordings'] if activation is None: activation = Tanh() if dims: child = MLP([activation] * len(dims), [self.num_features] + dims, name="bottom") self.output_dim = child.output_dim else: child = Identity(name='bottom') self.output_dim = self.num_features self.children.append(child) self.mask = tensor.matrix('recordings_mask') self.batch_inputs = { 'recordings': tensor.tensor3('recordings')} self.single_inputs = { 'recordings': tensor.matrix('recordings')}
Example #19
Source File: Conv1d.py From RaptorX-Contact with GNU General Public License v3.0 | 6 votes |
def testConv1DLayer(): rng = numpy.random.RandomState() input = T.tensor3('input') #windowSize = 3 n_in = 4 n_hiddens = [10,10,5] #convR = Conv1DR(rng, input, n_in, n_hiddens, windowSize/2) convLayer = Conv1DLayer(rng, input, n_in, 5, halfWinSize=1) #f = theano.function([input],convR.output) #f = theano.function([input],[convLayer.output, convLayer.out2, convLayer.convout, convLayer.out3]) f = theano.function([input], convLayer.output) numOfProfiles=6 seqLen = 10 profile = numpy.random.uniform(0,1, (numOfProfiles, seqLen,n_in)) out = f(profile) print out.shape print out
Example #20
Source File: utils.py From RaptorX-Contact with GNU General Public License v3.0 | 6 votes |
def TestMidpointFeature(): x = T.tensor3('x') y = MidpointFeature(x) f= theano.function([x], y) a = np.random.uniform(0, 1, (3, 10, 2)).astype(theano.config.floatX) b,c = f(a) print c #return print '**********0*********' print a[0] print b[0][0] print '********4*******' print a[0] print b[0][4] print '**********9******' print a[0] print b[0][9]
Example #21
Source File: gram.py From gram with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build_model(tparams, leavesList, ancestorsList, options): dropoutRate = options['dropoutRate'] trng = RandomStreams(123) use_noise = theano.shared(numpy_floatX(0.)) x = T.tensor3('x', dtype=config.floatX) y = T.tensor3('y', dtype=config.floatX) mask = T.matrix('mask', dtype=config.floatX) lengths = T.vector('lengths', dtype=config.floatX) n_timesteps = x.shape[0] n_samples = x.shape[1] embList = [] for leaves, ancestors in zip(leavesList, ancestorsList): tempAttention = generate_attention(tparams, leaves, ancestors) tempEmb = (tparams['W_emb'][ancestors] * tempAttention[:,:,None]).sum(axis=1) embList.append(tempEmb) emb = T.concatenate(embList, axis=0) x_emb = T.tanh(T.dot(x, emb)) hidden = gru_layer(tparams, x_emb, options) hidden = dropout_layer(hidden, use_noise, trng, dropoutRate) y_hat = softmax_layer(tparams, hidden) * mask[:,:,None] logEps = 1e-8 cross_entropy = -(y * T.log(y_hat + logEps) + (1. - y) * T.log(1. - y_hat + logEps)) output_loglikelihood = cross_entropy.sum(axis=2).sum(axis=0) / lengths cost_noreg = T.mean(output_loglikelihood) if options['L2'] > 0.: cost = cost_noreg + options['L2'] * ((tparams['W_output']**2).sum() + (tparams['W_attention']**2).sum() + (tparams['v_attention']**2).sum()) return use_noise, x, y, mask, lengths, cost, cost_noreg, y_hat
Example #22
Source File: test_scan_opt.py From attention-lvcsr with MIT License | 5 votes |
def test_batch(self): # This runs fine. The batch size is set to something greater than 1, # i.e. the data is represented by a tensor3 object. self._run(100, 10, batch_size=5, mode=mode)
Example #23
Source File: test_extra_ops.py From attention-lvcsr with MIT License | 5 votes |
def test_infer_shape(self): x = T.tensor3('x') a = np.random.random((3, 5, 2)).astype(config.floatX) # Test axis=None self._compile_and_check([x], [self.op(x)], [a], self.op_class) for axis in range(-len(a.shape), len(a.shape)): self._compile_and_check([x], [cumprod(x, axis=axis)], [a], self.op_class)
Example #24
Source File: test_extra_ops.py From attention-lvcsr with MIT License | 5 votes |
def test_CumprodOp(self): x = T.tensor3('x') a = np.random.random((3, 5, 2)).astype(config.floatX) # Test axis out of bounds self.assertRaises(ValueError, cumprod, x, axis=3) self.assertRaises(ValueError, cumprod, x, axis=-4) f = theano.function([x], cumprod(x)) assert np.allclose(np.cumprod(a), f(a)) # Test axis=None for axis in range(-len(a.shape), len(a.shape)): f = theano.function([x], cumprod(x, axis=axis)) assert np.allclose(np.cumprod(a, axis=axis), f(a))
Example #25
Source File: test_extra_ops.py From attention-lvcsr with MIT License | 5 votes |
def test_infer_shape(self): x = T.tensor3('x') a = np.random.random((3, 5, 2)).astype(config.floatX) # Test axis=None self._compile_and_check([x], [self.op(x)], [a], self.op_class) for axis in range(-len(a.shape), len(a.shape)): self._compile_and_check([x], [cumsum(x, axis=axis)], [a], self.op_class)
Example #26
Source File: test_extra_ops.py From attention-lvcsr with MIT License | 5 votes |
def test_cumsumOp(self): x = T.tensor3('x') a = np.random.random((3, 5, 2)).astype(config.floatX) # Test axis out of bounds self.assertRaises(ValueError, cumsum, x, axis=3) self.assertRaises(ValueError, cumsum, x, axis=-4) f = theano.function([x], cumsum(x)) assert np.allclose(np.cumsum(a), f(a)) # Test axis=None for axis in range(-len(a.shape), len(a.shape)): f = theano.function([x], cumsum(x, axis=axis)) assert np.allclose(np.cumsum(a, axis=axis), f(a))
Example #27
Source File: skipthoughts.py From TAC-GAN with GNU General Public License v3.0 | 5 votes |
def build_encoder(tparams, options): """ build an encoder, given pre-computed word embeddings """ # word embedding (source) embedding = tensor.tensor3('embedding', dtype='float32') x_mask = tensor.matrix('x_mask', dtype='float32') # encoder proj = get_layer(options['encoder'])[1](tparams, embedding, options, prefix='encoder', mask=x_mask) ctx = proj[0][-1] return embedding, x_mask, ctx
Example #28
Source File: theano_utils.py From seq2seq-keyphrase with MIT License | 5 votes |
def ndim_tensor(ndim): if ndim == 1: return T.vector() elif ndim == 2: return T.matrix() elif ndim == 3: return T.tensor3() elif ndim == 4: return T.tensor4() return T.matrix() # get int32 tensor
Example #29
Source File: blizzard_data.py From SWaveNet with MIT License | 5 votes |
def theano_vars(self): return T.tensor3('x', dtype=theano.config.floatX)
Example #30
Source File: models.py From dcnn with MIT License | 5 votes |
def __init__(self, parameters, A): self.params = parameters # Prepare indices input. self.var_K = T.tensor3('Apow') self.var_X = T.matrix('X') self.var_I = T.ivector('I') self.var_Y = T.imatrix('Y') self.l_in_k = lasagne.layers.InputLayer((None, self.params.num_hops + 1, self.params.num_nodes), input_var=self.var_K) self.l_in_x = lasagne.layers.InputLayer((self.params.num_nodes, self.params.num_features), input_var=self.var_X) self.l_indices = lasagne.layers.InputLayer( (None,), input_var=self.var_I ) self.K = util.A_to_diffusion_kernel(A, self.params.num_hops) # Overridable to customize init behavior. self._register_model_layers() loss_fn = params.loss_map[self.params.loss_fn] update_fn = params.update_map[self.params.update_fn] prediction = lasagne.layers.get_output(self.l_out) self._loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean') model_parameters = lasagne.layers.get_all_params(self.l_out) self._updates = update_fn(self._loss, model_parameters, learning_rate=self.params.learning_rate) if self.params.momentum: self._updates = lasagne.updates.apply_momentum(self._updates, model_parameters) self.apply_loss_and_update = theano.function([self.var_K, self.var_X, self.var_I, self.var_Y], self._loss, updates=self._updates) self.apply_loss = theano.function([self.var_K, self.var_X, self.var_I, self.var_Y], self._loss)