Python theano.tensor.TensorType() Examples
The following are 30
code examples of theano.tensor.TensorType().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.

Example #1
Source File: opt.py From D-VAE with MIT License | 6 votes |
def local_gpu_extract_diagonal(node): """ extract_diagonal(host_from_gpu()) -> host_from_gpu(extract_diagonal) gpu_from_host(extract_diagonal) -> extract_diagonal(gpu_from_host) """ if (isinstance(node.op, nlinalg.ExtractDiag) and isinstance(node.inputs[0].type, theano.tensor.TensorType)): inp = node.inputs[0] if inp.owner and isinstance(inp.owner.op, HostFromGpu): return [host_from_gpu(nlinalg.extract_diag( as_cuda_ndarray_variable(inp)))] if isinstance(node.op, GpuFromHost): host_input = node.inputs[0] if (host_input.owner and isinstance(host_input.owner.op, nlinalg.ExtractDiag) and isinstance(host_input.owner.inputs[0].type, theano.tensor.TensorType)): diag_node = host_input.owner return [nlinalg.extract_diag( as_cuda_ndarray_variable(diag_node.inputs[0]))] return False
Example #2
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_sanity_check_slice(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicSlice = SliceType()() z = GetItem()(mySymbolicMatricesList, mySymbolicSlice) self.assertFalse(isinstance(z, T.TensorVariable)) f = theano.function([mySymbolicMatricesList, mySymbolicSlice], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], slice(0, 1, 1)), [x]))
Example #3
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_op_sd(self): for format in sparse.sparse_formats: for dtype in sparse.all_dtypes: variable, data = sparse_random_inputs(format, shape=(10, 10), out_dtype=dtype, n=2, p=0.1) variable[1] = tensor.TensorType(dtype=dtype, broadcastable=(False, False))() data[1] = data[1].toarray() f = theano.function(variable, self.op(*variable)) tested = f(*data) expected = numpy.dot(data[0].toarray(), data[1]) assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() utt.assert_allclose(tested, expected)
Example #4
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_insert_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicIndex = T.scalar(dtype='int64') mySymbolicMatrix = T.matrix() z = Insert()(mySymbolicMatricesList, mySymbolicIndex, mySymbolicMatrix) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList, borrow=True, mutable=True), mySymbolicIndex, mySymbolicMatrix], z, accept_inplace=True, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1, dtype='int64'), y), [x, y]))
Example #5
Source File: basic.py From D-VAE with MIT License | 6 votes |
def _is_sparse_variable(x): """ Returns ------- boolean True iff x is a L{SparseVariable} (and not a L{tensor.TensorType}, for instance). """ if not isinstance(x, gof.Variable): raise NotImplementedError("this function should only be called on " "*variables* (of type sparse.SparseType " "or tensor.TensorType, for instance), not ", x) return isinstance(x.type, SparseType)
Example #6
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_sanity_check_single(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicScalar = T.scalar(dtype='int64') z = GetItem()(mySymbolicMatricesList, mySymbolicScalar) f = theano.function([mySymbolicMatricesList, mySymbolicScalar], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(0, dtype='int64')), x))
Example #7
Source File: opt.py From D-VAE with MIT License | 6 votes |
def make_node(self, x, y): x, y = sparse.as_sparse_variable(x), tensor.as_tensor_variable(y) out_dtype = scalar.upcast(x.type.dtype, y.type.dtype) if self.inplace: assert out_dtype == y.dtype indices, indptr, data = csm_indices(x), csm_indptr(x), csm_data(x) # We either use CSC or CSR depending on the format of input assert self.format == x.type.format # The magic number two here arises because L{scipy.sparse} # objects must be matrices (have dimension 2) assert y.type.ndim == 2 out = tensor.TensorType(dtype=out_dtype, broadcastable=y.type.broadcastable)() return gof.Apply(self, [data, indices, indptr, y], [out])
Example #8
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_interface(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicScalar = T.scalar(dtype='int64') z = mySymbolicMatricesList[mySymbolicScalar] f = theano.function([mySymbolicMatricesList, mySymbolicScalar], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(0, dtype='int64')), x)) z = mySymbolicMatricesList[0] f = theano.function([mySymbolicMatricesList], z) self.assertTrue(numpy.array_equal(f([x]), x))
Example #9
Source File: test_basic.py From D-VAE with MIT License | 6 votes |
def test_constant_input(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = GetItem()(mySymbolicMatricesList, 0) f = theano.function([mySymbolicMatricesList], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x]), x)) z = GetItem()(mySymbolicMatricesList, slice(0, 1, 1)) f = theano.function([mySymbolicMatricesList], z) self.assertTrue(numpy.array_equal(f([x]), [x]))
Example #10
Source File: test_shared.py From D-VAE with MIT License | 6 votes |
def test_ctors(self): if theano.configdefaults.python_int_bitwidth() == 32: assert shared(7).type == theano.tensor.iscalar, shared(7).type else: assert shared(7).type == theano.tensor.lscalar, shared(7).type assert shared(7.0).type == theano.tensor.dscalar assert shared(numpy.float32(7)).type == theano.tensor.fscalar # test tensor constructor b = shared(numpy.zeros((5, 5), dtype='int32')) assert b.type == TensorType('int32', broadcastable=[False, False]) b = shared(numpy.random.rand(4, 5)) assert b.type == TensorType('float64', broadcastable=[False, False]) b = shared(numpy.random.rand(5, 1, 2)) assert b.type == TensorType('float64', broadcastable=[False, False, False]) assert shared([]).type == generic def badfunc(): shared(7, bad_kw=False) self.assertRaises(TypeError, badfunc)
Example #11
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_remove_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatrix = T.matrix() z = Remove()(mySymbolicMatricesList, mySymbolicMatrix) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList, borrow=True, mutable=True), In(mySymbolicMatrix, borrow=True, mutable=True)], z, accept_inplace=True, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
Example #12
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_append_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatrix = T.matrix() z = Append()(mySymbolicMatricesList, mySymbolicMatrix) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList, borrow=True, mutable=True), In(mySymbolicMatrix, borrow=True, mutable=True)], z, accept_inplace=True, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
Example #13
Source File: raw_random.py From D-VAE with MIT License | 6 votes |
def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None): """ Sample from a uniform distribution between low and high. If the size argument is ambiguous on the number of dimensions, ndim may be a plain integer to supplement the missing information. If size is None, the output shape will be determined by the shapes of low and high. If dtype is not specified, it will be inferred from the dtype of low and high, but will be at least as precise as floatX. """ low = tensor.as_tensor_variable(low) high = tensor.as_tensor_variable(high) if dtype is None: dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype) ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high) op = RandomFunction('uniform', tensor.TensorType(dtype=dtype, broadcastable=bcast)) return op(random_state, size, low, high)
Example #14
Source File: corr.py From D-VAE with MIT License | 6 votes |
def make_node(self, img, topgrad, shape=None): img = as_tensor_variable(img) topgrad = as_tensor_variable(topgrad) if img.type.ndim != 4: raise TypeError('img must be 4D tensor') if topgrad.type.ndim != 4: raise TypeError('topgrad must be 4D tensor') if self.subsample != (1, 1) or self.border_mode == "half": if shape is None: raise ValueError('shape must be given if subsample != (1, 1)' ' or border_mode == "half"') height_width = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64')] else: height_width = [] broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1], False, False] dtype = img.type.dtype return Apply(self, [img, topgrad] + height_width, [TensorType(dtype, broadcastable)()])
Example #15
Source File: corr.py From D-VAE with MIT License | 6 votes |
def make_node(self, kern, topgrad, shape=None): kern = as_tensor_variable(kern) topgrad = as_tensor_variable(topgrad) if kern.type.ndim != 4: raise TypeError('kern must be 4D tensor') if topgrad.type.ndim != 4: raise TypeError('topgrad must be 4D tensor') if self.subsample != (1, 1) and shape is None: raise ValueError('shape must be given if subsample != (1, 1)') height_width = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64')] if self.subsample != (1, 1) else [] broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1], False, False] dtype = kern.type.dtype return Apply(self, [kern, topgrad] + height_width, [TensorType(dtype, broadcastable)()])
Example #16
Source File: opt.py From D-VAE with MIT License | 6 votes |
def local_abstractconv_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv2d): return None img, kern = node.inputs if not isinstance(img.type, TensorType) or \ not isinstance(kern.type, TensorType): return None # need to flip the kernel if necessary if node.op.filter_flip: kern = kern[:, :, ::-1, ::-1] rval = CorrMM(border_mode=node.op.border_mode, subsample=node.op.subsample)(img, kern) copy_stack_trace(node.outputs[0], rval) return [rval]
Example #17
Source File: opt.py From D-VAE with MIT License | 6 votes |
def local_abstractconv_gradweight_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv2d_gradWeights): return None img, topgrad, shape = node.inputs if not isinstance(img.type, TensorType) or \ not isinstance(topgrad.type, TensorType): return None rval = CorrMM_gradWeights(border_mode=node.op.border_mode, subsample=node.op.subsample)(img, topgrad, shape) copy_stack_trace(node.outputs[0], rval) # need to flip the kernel if necessary if node.op.filter_flip: rval = rval[:, :, ::-1, ::-1] rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable) copy_stack_trace(node.outputs[0], rval) return [rval]
Example #18
Source File: opt.py From D-VAE with MIT License | 6 votes |
def local_conv2d_cpu(node): if not isinstance(node.op, AbstractConv2d): return None img, kern = node.inputs if ((not isinstance(img.type, TensorType) or not isinstance(kern.type, TensorType))): return None if node.op.border_mode not in ['full', 'valid']: return None if not node.op.filter_flip: # Not tested yet return None rval = conv2d(img, kern, node.op.imshp, node.op.kshp, border_mode=node.op.border_mode, subsample=node.op.subsample) copy_stack_trace(node.outputs[0], rval) return [rval]
Example #19
Source File: test_subtensor.py From D-VAE with MIT License | 6 votes |
def __init__(self, name, shared=tensor._shared, sub=tensor.Subtensor, inc_sub=tensor.IncSubtensor, adv_sub1=tensor.AdvancedSubtensor1, adv_incsub1=tensor.AdvancedIncSubtensor1, mode=None, dtype=theano.config.floatX, type=tensor.TensorType, ignore_topo=DeepCopyOp): self.shared = shared self.sub = sub self.inc_sub = inc_sub self.adv_sub1 = adv_sub1 self.adv_incsub1 = adv_incsub1 if mode is None: mode = theano.compile.mode.get_default_mode() self.mode = mode self.dtype = dtype self.type = type self.ignore_topo = ignore_topo self.fast_compile = theano.config.mode == 'FAST_COMPILE' self.ops = (sub, inc_sub, adv_sub1, adv_incsub1) return super(T_subtensor, self).__init__(name)
Example #20
Source File: test_blas.py From D-VAE with MIT License | 6 votes |
def test_maxpool(): """TODO: test the gpu version!!! """ for d0, d1, r_true, r_false in [(4, 4, [[[[5, 7], [13, 15]]]], [[[[5, 7], [13, 15]]]]), (5, 5, [[[[6, 8], [ 16, 18], [ 21, 23]]]], [[[[6, 8, 9], [ 16, 18, 19], [ 21, 23, 24]]]])]: for border, ret in [(True, r_true), (False, r_false)]: ret = numpy.array(ret) a = tcn.blas.Pool((2, 2), border) dmatrix4 = tensor.TensorType("float32", (False, False, False, False)) b = dmatrix4() f = pfunc([b], [a(b)], mode=mode_with_gpu) bval = numpy.arange(0, d0*d1).reshape(1, 1, d0, d1) r = f(bval)[0] # print bval, bval.shape, border # print r, r.shape assert (ret == r).all()
Example #21
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): '''Instantiate an input data placeholder variable. ''' if dtype is None: dtype = floatx() if shape is None and ndim is None: raise ValueError('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) else: shape = tuple([None for _ in range(ndim)]) broadcast = (False,) * ndim if sparse: _assert_sparse_module() x = th_sparse_module.csr_matrix(name=name, dtype=dtype) else: x = T.TensorType(dtype, broadcast)(name) x._keras_shape = shape x._uses_learning_phase = False return x
Example #22
Source File: test_elemwise.py From D-VAE with MIT License | 6 votes |
def test_infer_shape(self): for s_left, s_right in [((5, 6), (5, 6)), ((5, 6), (5, 1)), ((5, 6), (1, 6)), ((5, 1), (5, 6)), ((1, 6), (5, 6)), ((2, 3, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 5), (2, 3, 1, 5)), ((2, 3, 4, 5), (1, 3, 4, 5)), ((2, 1, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 1), (2, 3, 4, 5))]: dtype = theano.config.floatX t_left = TensorType(dtype, [(entry == 1) for entry in s_left])() t_right = TensorType(dtype, [(entry == 1) for entry in s_right])() t_left_val = numpy.zeros(s_left, dtype=dtype) t_right_val = numpy.zeros(s_right, dtype=dtype) self._compile_and_check([t_left, t_right], [Elemwise(scalar.add)(t_left, t_right)], [t_left_val, t_right_val], Elemwise)
Example #23
Source File: raw_random.py From D-VAE with MIT License | 6 votes |
def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None): """ Sample from a normal distribution centered on avg with the specified standard deviation (std). If the size argument is ambiguous on the number of dimensions, ndim may be a plain integer to supplement the missing information. If size is None, the output shape will be determined by the shapes of avg and std. If dtype is not specified, it will be inferred from the dtype of avg and std, but will be at least as precise as floatX. """ avg = tensor.as_tensor_variable(avg) std = tensor.as_tensor_variable(std) if dtype is None: dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype) ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std) op = RandomFunction('normal', tensor.TensorType(dtype=dtype, broadcastable=bcast)) return op(random_state, size, avg, std)
Example #24
Source File: opt.py From D-VAE with MIT License | 5 votes |
def safe_to_gpu(x): if (isinstance(x.type, tensor.TensorType) and x.type.dtype == 'float32'): return as_cuda_ndarray_variable(x) else: return x
Example #25
Source File: type.py From D-VAE with MIT License | 5 votes |
def values_eq(a, b): # TODO: make the comparaison without transfert. return tensor.TensorType.values_eq(numpy.asarray(a), numpy.asarray(b))
Example #26
Source File: type.py From D-VAE with MIT License | 5 votes |
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False, rtol=None, atol=None): # TODO: make the comparaison without transfert. return tensor.TensorType.values_eq_approx( numpy.asarray(a), numpy.asarray(b), allow_remove_inf=allow_remove_inf, allow_remove_nan=allow_remove_nan, rtol=rtol, atol=atol )
Example #27
Source File: opt.py From D-VAE with MIT License | 5 votes |
def tensor_to_cuda(x): if (isinstance(x.type, tensor.TensorType) and x.type.dtype == 'float32'): y = CudaNdarrayType(broadcastable=x.type.broadcastable)() if x.name: y.name = x.name + '[cuda]' return y else: return x
Example #28
Source File: solve.py From D-VAE with MIT License | 5 votes |
def make_node(self, A, b): A_ = tensor.as_tensor_variable(A) b_ = tensor.as_tensor_variable(b) if A_.broadcastable != (False, False): raise TypeError("A must be a matrix", A_.type) if b_.broadcastable not in ((False,), (True, False), (False, False)): raise TypeError("b must be a matrix or vector", b_.type) odtype = scalar.upcast(A_.dtype, b_.dtype) otype = tensor.TensorType(broadcastable=b_.broadcastable, dtype=odtype) return gof.Apply(op=self, inputs=[A_, b_], outputs=[otype()])
Example #29
Source File: test_types.py From D-VAE with MIT License | 5 votes |
def make_node(self, c): return Apply(self, [c], [TensorType('float32', (False,))()])
Example #30
Source File: basic_ops.py From D-VAE with MIT License | 5 votes |
def make_node(self, x): if not isinstance(x.type, tensor.TensorType): raise TypeError(x) return Apply(self, [x], [GpuArrayType(broadcastable=x.broadcastable, context_name=self.context_name, dtype=x.dtype)()])