Python theano.tensor.dvector() Examples
The following are 30
code examples of theano.tensor.dvector().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: test_shared_randomstreams.py From D-VAE with MIT License | 6 votes |
def test_broadcast_arguments(self): random = RandomStreams(utt.fetch_seed()) low = tensor.dvector() high = tensor.dcol() out = random.uniform(low=low, high=high) assert out.ndim == 2 f = function([low, high], out) rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30) numpy_rng = numpy.random.RandomState(int(rng_seed)) val0 = f([-5, .5, 0, 1], [[1.]]) val1 = f([.9], [[1.], [1.1], [1.5]]) val2 = f([-5, .5, 0, 1], [[1.], [1.1], [1.5]]) numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.]) numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]]) numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[[1.], [1.1], [1.5]]) assert numpy.all(val0 == numpy_val0) assert numpy.all(val1 == numpy_val1) assert numpy.all(val2 == numpy_val2)
Example #2
Source File: test_pfunc.py From attention-lvcsr with MIT License | 6 votes |
def test_param_strict(self): a = tensor.dvector() b = shared(7) out = a + b f = pfunc([In(a, strict=False)], [out]) # works, rand generates float64 by default f(numpy.random.rand(8)) # works, casting is allowed f(numpy.array([1, 2, 3, 4], dtype='int32')) f = pfunc([In(a, strict=True)], [out]) try: # fails, f expects float64 f(numpy.array([1, 2, 3, 4], dtype='int32')) except TypeError: pass
Example #3
Source File: test_pfunc.py From D-VAE with MIT License | 6 votes |
def test_param_mutable(self): a = tensor.dvector() a_out = a * 2 # assuming the op which makes this "in place" triggers # using mutable=True will let fip change the value in aval fip = pfunc([In(a, mutable=True)], [a_out], mode='FAST_RUN') aval = numpy.random.rand(10) aval2 = aval.copy() assert numpy.all(fip(aval) == (aval2 * 2)) assert not numpy.all(aval == aval2) # using mutable=False should leave the input untouched f = pfunc([In(a, mutable=False)], [a_out], mode='FAST_RUN') aval = numpy.random.rand(10) aval2 = aval.copy() assert numpy.all(f(aval) == (aval2 * 2)) assert numpy.all(aval == aval2)
Example #4
Source File: test_pfunc.py From D-VAE with MIT License | 6 votes |
def test_param_strict(self): a = tensor.dvector() b = shared(7) out = a + b f = pfunc([In(a, strict=False)], [out]) # works, rand generates float64 by default f(numpy.random.rand(8)) # works, casting is allowed f(numpy.array([1, 2, 3, 4], dtype='int32')) f = pfunc([In(a, strict=True)], [out]) try: # fails, f expects float64 f(numpy.array([1, 2, 3, 4], dtype='int32')) except TypeError: pass
Example #5
Source File: test_pfunc.py From attention-lvcsr with MIT License | 6 votes |
def test_param_mutable(self): a = tensor.dvector() a_out = a * 2 # assuming the op which makes this "in place" triggers # using mutable=True will let fip change the value in aval fip = pfunc([In(a, mutable=True)], [a_out], mode='FAST_RUN') aval = numpy.random.rand(10) aval2 = aval.copy() assert numpy.all(fip(aval) == (aval2 * 2)) assert not numpy.all(aval == aval2) # using mutable=False should leave the input untouched f = pfunc([In(a, mutable=False)], [a_out], mode='FAST_RUN') aval = numpy.random.rand(10) aval2 = aval.copy() assert numpy.all(f(aval) == (aval2 * 2)) assert numpy.all(aval == aval2)
Example #6
Source File: test_raw_random.py From D-VAE with MIT License | 6 votes |
def test_broadcast_arguments(self): rng_R = random_state_type() low = tensor.dvector() high = tensor.dcol() post_r, out = uniform(rng_R, low=low, high=high) assert out.ndim == 2 f = compile.function([rng_R, low, high], [post_r, out], accept_inplace=True) rng_state0 = numpy.random.RandomState(utt.fetch_seed()) numpy_rng = numpy.random.RandomState(utt.fetch_seed()) post0, val0 = f(rng_state0, [-5, .5, 0, 1], [[1.]]) post1, val1 = f(post0, [.9], [[1.], [1.1], [1.5]]) post2, val2 = f(post1, [-5, .5, 0, 1], [[1.], [1.1], [1.5]]) numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.]) numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]]) numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[[1.], [1.1], [1.5]]) assert numpy.all(val0 == numpy_val0), (val0, numpy_val0) assert numpy.all(val1 == numpy_val1) assert numpy.all(val2 == numpy_val2)
Example #7
Source File: test_ops.py From attention-lvcsr with MIT License | 6 votes |
def test_infer_shape(self): x = dmatrix('x') x.tag.test_value = np.zeros((2, 2)) y = dvector('y') y.tag.test_value = [0, 0] def infer_shape(node, shapes): x, y = shapes return [y] @as_op([dmatrix, dvector], dvector, infer_shape) def cumprod_plus(x, y): return np.cumprod(x) + y self._compile_and_check([x, y], [cumprod_plus(x, y)], [[[1.5, 5], [2, 2]], [1, 100, 2, 200]], cumprod_plus.__class__, warn=False)
Example #8
Source File: test_ops.py From D-VAE with MIT License | 6 votes |
def test_infer_shape(self): x = dmatrix('x') x.tag.test_value = np.zeros((2, 2)) y = dvector('y') y.tag.test_value = [0, 0] def infer_shape(node, shapes): x, y = shapes return [y] @as_op([dmatrix, dvector], dvector, infer_shape) def cumprod_plus(x, y): return np.cumprod(x) + y self._compile_and_check([x, y], [cumprod_plus(x, y)], [[[1.5, 5], [2, 2]], [1, 100, 2, 200]], cumprod_plus.__class__, warn=False)
Example #9
Source File: test_raw_random.py From D-VAE with MIT License | 6 votes |
def test_basic_usage(self): rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector) assert not rf.inplace assert getattr(rf, 'destroy_map', {}) == {} rng_R = random_state_type() # If calling RandomFunction directly, all args have to be specified, # because shape will have to be moved to the end post_r, out = rf(rng_R, (4,), 0., 1.) assert out.type == tensor.dvector f = compile.function([rng_R], out) rng_state0 = numpy.random.RandomState(utt.fetch_seed()) f_0 = f(rng_state0) f_1 = f(rng_state0) assert numpy.all(f_0 == f_1)
Example #10
Source File: test_printing.py From D-VAE with MIT License | 6 votes |
def test_pydotprint_long_name(): """This is a REALLY PARTIAL TEST. It prints a graph where there are variable and apply nodes whose long names are different, but not the shortened names. We should not merge those nodes in the dot graph. """ # Skip test if pydot is not available. if not theano.printing.pydot_imported: raise SkipTest('pydot not available') x = tensor.dvector() mode = theano.compile.mode.get_default_mode().excluding("fusion") f = theano.function([x], [x * 2, x + x], mode=mode) f([1, 2, 3, 4]) theano.printing.pydotprint(f, max_label_size=5, print_output_file=False) theano.printing.pydotprint([x * 2, x + x], max_label_size=5, print_output_file=False)
Example #11
Source File: test_raw_random.py From attention-lvcsr with MIT License | 6 votes |
def test_basic_usage(self): rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector) assert not rf.inplace assert getattr(rf, 'destroy_map', {}) == {} rng_R = random_state_type() # If calling RandomFunction directly, all args have to be specified, # because shape will have to be moved to the end post_r, out = rf(rng_R, (4,), 0., 1.) assert out.type == tensor.dvector f = compile.function([rng_R], out) rng_state0 = numpy.random.RandomState(utt.fetch_seed()) f_0 = f(rng_state0) f_1 = f(rng_state0) assert numpy.all(f_0 == f_1)
Example #12
Source File: test_printing.py From attention-lvcsr with MIT License | 6 votes |
def test_pydotprint_long_name(): """This is a REALLY PARTIAL TEST. It prints a graph where there are variable and apply nodes whose long names are different, but not the shortened names. We should not merge those nodes in the dot graph. """ # Skip test if pydot is not available. if not theano.printing.pydot_imported: raise SkipTest('pydot not available') x = tensor.dvector() mode = theano.compile.mode.get_default_mode().excluding("fusion") f = theano.function([x], [x * 2, x + x], mode=mode) f([1, 2, 3, 4]) theano.printing.pydotprint(f, max_label_size=5, print_output_file=False) theano.printing.pydotprint([x * 2, x + x], max_label_size=5, print_output_file=False)
Example #13
Source File: test_gc.py From D-VAE with MIT License | 6 votes |
def test_merge_opt_runtime(): """In the original merge optimization, the following graph took like caused the MERGE optimizer to exhibit really bad performance (quadratic? exponential?) Ironically, there is actually no merging to do in this graph. """ x = T.dvector() for i in xrange(50): if i: r = r + r/10 else: r = x t = time.time() f = theano.function([x], r, mode='FAST_COMPILE') # FAST_RUN does in-place optimizer which requires a lot of # toposorting, which is actually pretty slow at the moment. This # test was designed to test MergeOptimizer... so I'm leaving # toposort optimizations for a later date. dt = time.time() - t # it should never take longer than 5 seconds to compile this graph assert dt < 5.0, dt
Example #14
Source File: test_fourier.py From D-VAE with MIT License | 6 votes |
def test_infer_shape(self): a = tensor.dvector() self._compile_and_check([a], [self.op(a, 16, 0)], [numpy.random.rand(12)], self.op_class) a = tensor.dmatrix() for var in [self.op(a, 16, 1), self.op(a, None, 1), self.op(a, 16, None), self.op(a, None, None)]: self._compile_and_check([a], [var], [numpy.random.rand(12, 4)], self.op_class) b = tensor.iscalar() for var in [self.op(a, 16, b), self.op(a, None, b)]: self._compile_and_check([a, b], [var], [numpy.random.rand(12, 4), 0], self.op_class)
Example #15
Source File: gen_graph.py From attention-lvcsr with MIT License | 6 votes |
def timeit_2vector_theano(init, nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr="a**2 + b**2 + 2*a*b"): t3 = timeit.Timer("tf(av,bv)", """ import theano import theano.tensor as T import numexpr as ne from theano.tensor import exp %(init)s av=a bv=b a=T.dvector() b=T.dvector() tf= theano.function([a,b],%(expr)s) """%locals() ) ret=t3.repeat(nb_repeat,nb_call) return np.asarray(ret)
Example #16
Source File: test_shared_randomstreams.py From attention-lvcsr with MIT License | 6 votes |
def test_broadcast_arguments(self): random = RandomStreams(utt.fetch_seed()) low = tensor.dvector() high = tensor.dcol() out = random.uniform(low=low, high=high) assert out.ndim == 2 f = function([low, high], out) rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30) numpy_rng = numpy.random.RandomState(int(rng_seed)) val0 = f([-5, .5, 0, 1], [[1.]]) val1 = f([.9], [[1.], [1.1], [1.5]]) val2 = f([-5, .5, 0, 1], [[1.], [1.1], [1.5]]) numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.]) numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]]) numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[[1.], [1.1], [1.5]]) assert numpy.all(val0 == numpy_val0) assert numpy.all(val1 == numpy_val1) assert numpy.all(val2 == numpy_val2)
Example #17
Source File: test_fourier.py From attention-lvcsr with MIT License | 6 votes |
def test_infer_shape(self): a = tensor.dvector() self._compile_and_check([a], [self.op(a, 16, 0)], [numpy.random.rand(12)], self.op_class) a = tensor.dmatrix() for var in [self.op(a, 16, 1), self.op(a, None, 1), self.op(a, 16, None), self.op(a, None, None)]: self._compile_and_check([a], [var], [numpy.random.rand(12, 4)], self.op_class) b = tensor.iscalar() for var in [self.op(a, 16, b), self.op(a, None, b)]: self._compile_and_check([a, b], [var], [numpy.random.rand(12, 4), 0], self.op_class)
Example #18
Source File: gen_graph.py From D-VAE with MIT License | 6 votes |
def timeit_2vector_theano(init, nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr="a**2 + b**2 + 2*a*b"): t3 = timeit.Timer("tf(av,bv)", """ import theano import theano.tensor as T import numexpr as ne from theano.tensor import exp %(init)s av=a bv=b a=T.dvector() b=T.dvector() tf= theano.function([a,b],%(expr)s) """%locals() ) ret=t3.repeat(nb_repeat,nb_call) return np.asarray(ret)
Example #19
Source File: test_raw_random.py From attention-lvcsr with MIT License | 5 votes |
def test_inplace_norun(self): rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector, inplace=True) assert rf.inplace assert getattr(rf, 'destroy_map', {}) != {}
Example #20
Source File: utils_.py From kusanagi with MIT License | 5 votes |
def get_compiled_gTrig(angi, D, derivs=True): m = tt.dvector('x') # n_samples x idims v = tt.dmatrix('x_cov') # n_samples x idims x idims gt = gTrig2(m, v, angi, D, derivs=derivs) return theano.function([m, v], gt)
Example #21
Source File: test_theano_utils.py From pysaliency with MIT License | 5 votes |
def setUp(self): self.x = T.dvector('x') self.x.tag.test_value = np.linspace(0, 1, 20) self.y = T.dvector('y') self.y.tag.test_value = np.linspace(0, 1, 20) self.input = T.dvector('input') self.input.tag.test_value = np.linspace(0, 1, 20) self.length = 20 self.nonlin = nonlinearity(self.input, self.x, self.y, self.length) self.f = theano.function([self.input, self.x, self.y], self.nonlin)
Example #22
Source File: theano_test.py From odl with Mozilla Public License 2.0 | 5 votes |
def test_theano_operator(): """Test the ODL->Theano operator wrapper.""" # Define ODL operator matrix = np.random.rand(3, 2) odl_op = odl.MatrixOperator(matrix) # Define evaluation points x = [1., 2.] dy = [1., 2., 3.] # Create Theano placeholders x_theano = T.dvector() dy_theano = T.dvector() # Create Theano layer from odl operator odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op) # Build computation graphs y_theano = odl_op_layer(x_theano) y_theano_func = theano.function([x_theano], y_theano) dy_theano_func = theano.function([x_theano, dy_theano], T.Rop(y_theano, x_theano, dy_theano)) # Evaluate using Theano result = y_theano_func(x) expected = odl_op(x) assert all_almost_equal(result, expected) # Evaluate the adjoint of the derivative, called gradient in Theano result = dy_theano_func(x, dy) expected = odl_op.derivative(x).adjoint(dy) assert all_almost_equal(result, expected)
Example #23
Source File: test_fastsweep.py From beat with GNU General Public License v3.0 | 5 votes |
def _theano_c_wrapper(self): Slownesses = self.get_slownesses() slownesses = tt.dvector('slownesses') slownesses.tag.test_value = Slownesses.flatten() nuc_x = tt.lscalar('nuc_x') nuc_x.tag.test_value = self.nuc_x nuc_y = tt.lscalar('nuc_y') nuc_y.tag.test_value = self.nuc_y cleanup = theanof.Sweeper( self.patch_size / km, self.n_patch_dip, self.n_patch_strike, 'c') start_times = cleanup(slownesses, nuc_y, nuc_x) t0 = time() f = function([slownesses, nuc_y, nuc_x], start_times) t1 = time() theano_c_wrap_start_times = f( Slownesses.flatten(), self.nuc_y, self.nuc_x) print('tc', theano_c_wrap_start_times) t2 = time() logger.info('Theano C wrapper compile time %f' % (t1 - t0)) logger.info('done theano C wrapper fast_sweeping in %f' % (t2 - t1)) print('Theano C wrapper compile time %f' % (t1 - t0)) return theano_c_wrap_start_times
Example #24
Source File: test_misc.py From attention-lvcsr with MIT License | 5 votes |
def __init__(self, input=tensor.dvector('input'), target=tensor.dvector('target'), n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw): super(NNet, self).__init__(**kw) self.input = input self.target = target self.lr = shared(lr, 'learning_rate') self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1') self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2') # print self.lr.type self.hidden = sigmoid(tensor.dot(self.w1, self.input)) self.output = tensor.dot(self.w2, self.hidden) self.cost = tensor.sum((self.output - self.target)**2) self.sgd_updates = { self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1), self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)} self.sgd_step = pfunc( params=[self.input, self.target], outputs=[self.output, self.cost], updates=self.sgd_updates) self.compute_output = pfunc([self.input], self.output) self.output_from_hidden = pfunc([self.hidden], self.output)
Example #25
Source File: test_raw_random.py From attention-lvcsr with MIT License | 5 votes |
def test_no_inplace(self): """Test that when not running inplace, the RandomState is not updated""" rf = RandomFunction('uniform', tensor.dvector) rng_R = random_state_type() post_r, out = rf(rng_R, (3,), 0., 1.) f = compile.function([rng_R], [post_r, out]) rng = numpy.random.RandomState(utt.fetch_seed()) rng0, val0 = f(rng) rng_ = numpy.random.RandomState(utt.fetch_seed()) # rng should still be in a fresh state self.assertTrue(rng_R.type.values_eq(rng, rng_)) # rng0 should be in an updated state self.assertFalse(rng_R.type.values_eq(rng, rng0)) f2 = compile.function( [compile.In(rng_R, value=rng, update=post_r, mutable=False)], [post_r, out]) rng2, val2 = f2() # rng should be in a fresh state self.assertTrue(rng_R.type.values_eq(rng, rng_)) # rng2 should be in an updated state self.assertFalse(rng_R.type.values_eq(rng, rng2)) # The updated state should be the same for both functions self.assertTrue(rng_R.type.values_eq(rng2, rng0)) rng3, val3 = f2() # rng2 should not have changed self.assertTrue(rng_R.type.values_eq(rng2, rng0)) # rng3 should be an updated again version of rng2 self.assertFalse(rng_R.type.values_eq(rng3, rng2)) self.assertFalse(rng_R.type.values_eq(rng3, rng))
Example #26
Source File: test_raw_random.py From attention-lvcsr with MIT License | 5 votes |
def test_inplace_optimization(self): """Test that FAST_RUN includes the random_make_inplace optimization""" #inplace = False rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector) rng_R = random_state_type() # If calling RandomFunction directly, all args have to be specified, # because shape will have to be moved to the end post_r2, out2 = rf2(rng_R, (4,), 0., 1.) f = compile.function( [compile.In(rng_R, value=numpy.random.RandomState(utt.fetch_seed()), update=post_r2, mutable=True)], out2, mode='FAST_RUN') # DEBUG_MODE can't pass the id-based # test below # test that the RandomState object stays the same from function call to # function call, but that the values returned change from call to call. id0 = id(f[rng_R]) val0 = f() assert id0 == id(f[rng_R]) val1 = f() assert id0 == id(f[rng_R]) assert not numpy.allclose(val0, val1)
Example #27
Source File: test_sort.py From attention-lvcsr with MIT License | 5 votes |
def test3(self): a = tensor.dvector() w2 = sort(a) f = theano.function([a], w2) gv = f(self.v_val) gt = np.sort(self.v_val) assert np.allclose(gv, gt)
Example #28
Source File: test_shared_randomstreams.py From attention-lvcsr with MIT License | 5 votes |
def test_uniform_vector(self): random = RandomStreams(utt.fetch_seed()) low = tensor.dvector() high = tensor.dvector() out = random.uniform(low=low, high=high) assert out.ndim == 1 f = function([low, high], out) low_val = [.1, .2, .3] high_val = [1.1, 2.2, 3.3] seed_gen = numpy.random.RandomState(utt.fetch_seed()) numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30))) # Arguments of size (3,) val0 = f(low_val, high_val) numpy_val0 = numpy_rng.uniform(low=low_val, high=high_val) print('THEANO', val0) print('NUMPY', numpy_val0) assert numpy.all(val0 == numpy_val0) # arguments of size (2,) val1 = f(low_val[:-1], high_val[:-1]) numpy_val1 = numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1]) print('THEANO', val1) print('NUMPY', numpy_val1) assert numpy.all(val1 == numpy_val1) # Specifying the size explicitly g = function([low, high], random.uniform(low=low, high=high, size=(3,))) val2 = g(low_val, high_val) numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30))) numpy_val2 = numpy_rng.uniform(low=low_val, high=high_val, size=(3,)) assert numpy.all(val2 == numpy_val2) self.assertRaises(ValueError, g, low_val[:-1], high_val[:-1])
Example #29
Source File: test_blas_c.py From attention-lvcsr with MIT License | 5 votes |
def test_multiple_inplace(self): x = tensor.dmatrix('x') y = tensor.dvector('y') z = tensor.dvector('z') f = theano.function([x, y, z], [tensor.dot(y, x), tensor.dot(z,x)], mode=mode_blas_opt) vx = numpy.random.rand(3, 3) vy = numpy.random.rand(3) vz = numpy.random.rand(3) out = f(vx, vy, vz) assert numpy.allclose(out[0], numpy.dot(vy, vx)) assert numpy.allclose(out[1], numpy.dot(vz, vx)) assert len([n for n in f.maker.fgraph.apply_nodes if isinstance(n.op, tensor.AllocEmpty)]) == 2
Example #30
Source File: test_corr.py From attention-lvcsr with MIT License | 5 votes |
def test_wrong_input(self): """ Make sure errors are raised when image and kernel are not 4D tensors """ self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5), 'valid', input=T.dmatrix()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5), 'valid', filters=T.dvector()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5), 'valid', input=T.dtensor3())