Python theano._asarray() Examples
The following are 30
code examples of theano._asarray().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano
, or try the search function
.
Example #1
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_setitem_assign_to_slice(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8, 9], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # first get a slice of a _c = _a[:, :, 1] # set middle row through cube to 7,8,9 # (this corresponds to middle row of matrix _c) _c[:, 1] = _b a[:, :, 1][:, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _d = _a[1, :, :] _d[1, :] = b*10 a[1, :, :][1, :] = b*10 assert numpy.allclose(a, numpy.asarray(_a))
Example #2
Source File: test_pfunc.py From D-VAE with MIT License | 6 votes |
def test_givens(self): x = shared(0) assign = pfunc([], x, givens={x: 3}) assert assign() == 3 assert x.get_value(borrow=True) == 0 y = tensor.ivector() f = pfunc([y], (y * x), givens={x: 6}) assert numpy.all(f([1, 1, 1]) == [6, 6, 6]) assert x.get_value() == 0 z = tensor.ivector() c = z * y f = pfunc([y], (c + 7), givens={z: theano._asarray([4, 4, 4], dtype='int32')}) assert numpy.all(f([1, 1, 1]) == [11, 11, 11]) assert x.get_value() == 0
Example #3
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse6(): """ Test when all inputs have two broadcastable dimension at the beginning""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 'x', 0, 1) b = tcn.CudaNdarrayType((True, True, False, False))() f = pfunc([b], [a3 + b], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v) # print "Expected collapse to c contiguous"
Example #4
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse7(atol=1e-6): """ Test when one input have one broadcastable dimension and the other is a scalar""" shape = (5, 4, 1) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a.copy(), 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) f = pfunc([], [a3 + 2], mode=mode_with_gpu) # let debugmode catch errors out = f()[0] ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2]) assert numpy.allclose(out, ans, atol=atol) # print "Expected collapse to c contiguous"
Example #5
Source File: roc_auc.py From deep-mil-for-whole-mammogram-classification with MIT License | 6 votes |
def perform(self, node, inputs, output_storage): """ Calculate ROC AUC score. Parameters ---------- node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists. """ if roc_auc_score is None: raise RuntimeError("Could not import from sklearn.") y_true, y_score = inputs try: roc_auc = roc_auc_score(y_true, y_score) except ValueError: roc_auc = np.nan #rvalue = np.array((roc_auc, prec, reca, f1)) #[0][0] output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
Example #6
Source File: sigm.py From D-VAE with MIT License | 6 votes |
def c_code(self, node, name, inp, out, sub): x, = inp z, = out # These constants were obtained by looking at the output of # python commands like: # for i in xrange(750): # print i, repr(numpy.log1p(numpy.exp(theano._asarray([i,-i], dtype=dt)))) # the boundary checks prevent us from generating inf # float16 limits: -17.0, 6.0 # We use the float32 limits for float16 for now as the # computation will happend in float32 anyway. if (node.inputs[0].type == scalar.float32 or node.inputs[0].type == scalar.float16): return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals() elif node.inputs[0].type == scalar.float64: return """%(z)s = %(x)s < -745.0 ? 0.0 : %(x)s > 16.0 ? %(x)s : log1p(exp(%(x)s));""" % locals() else: raise NotImplementedError('only floatingpoint is implemented')
Example #7
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse4(): """ Test when only one inputs have two broadcastable dimension at each ends and we add a scalar""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b + 2) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2) # print "Expected collapse to 3 dimensions"
Example #8
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse3(): """ Test when only one inputs have two broadcastable dimension at each ends """ shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v) # print "Expected collapse to 3 dimensions"
Example #9
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse2(): """ Test when only one inputs have one broadcastable dimension """ shape = (4, 5, 9) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, False, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse to 3 dimensions"
Example #10
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def speed_elemwise_collapse2(): """ used to test the speed up of the generalised collapse of ccontiguous dims""" shape = (30, 40, 50, 600) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2[:, :, :, ::2] b = tcn.CudaNdarrayType((False, False, False, False))() c = a3 + b * tensor.exp(1 + b ** a3) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(*shape), dtype='float32') v = v[:, :, :, ::2] v = cuda_ndarray.CudaNdarray(v) t1 = time.time() for i in range(100): # let debugmode catch errors f(v) t2 = time.time()
Example #11
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_comparaison_cast(): """ test if an elemwise comparaison followed by a cast to float32 are pushed to gpu. """ a = tensor.fmatrix() b = tensor.fmatrix() av = theano._asarray(numpy.random.rand(4, 4), dtype='float32') bv = numpy.ones((4, 4), dtype='float32') for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv), (tensor.le, av <= bv), (tensor.ge, av >= bv)]: f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu) out = f(av, bv) assert numpy.all(out == ans) assert any([isinstance(node.op, cuda.GpuElemwise) for node in f.maker.fgraph.toposort()])
Example #12
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise4(): """ Test that two vectors can be broadcast to form an outer product (by performing rank-1 matrix update""" shape = (3, 4) a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a') b = tensor.fvector() c = tensor.fvector() f = pfunc([b, c], [], updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))], mode=mode_with_gpu) has_elemwise = False for i, node in enumerate(f.maker.fgraph.toposort()): has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise) assert not has_elemwise # let debugmode catch errors f(theano._asarray(numpy.random.rand(4), dtype='float32'), theano._asarray(numpy.random.rand(3), dtype='float32'))
Example #13
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise3(): """ Several kinds of elemwise expressions with dimension permutations and broadcasting""" shape = (3, 4, 5, 6) a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a') b = tensor.fvector() new_val = (a + b).dimshuffle([2, 0, 3, 1]) new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1]) f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu) has_elemwise = False for i, node in enumerate(f.maker.fgraph.toposort()): has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise) assert not has_elemwise # let debugmode catch errors f(theano._asarray(numpy.random.rand(6), dtype='float32'))
Example #14
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_exp(): #print >>sys.stdout, 'starting test_exp' for shape in ((), (3,), (2, 3), (1, 10000000), (10, 1000000), (100, 100000), (1000, 10000), (10000, 1000)): a0 = theano._asarray(numpy.random.rand(*shape), dtype='float32') a1 = a0.copy() b0 = cuda_ndarray.CudaNdarray(a0) b1 = cuda_ndarray.CudaNdarray(a1) t0 = time.time() bsum = b0.exp() t1 = time.time() gpu_dt = t1 - t0 t0 = time.time() asum = numpy.exp(a1) t1 = time.time() cpu_dt = t1 - t0 # print shape, 'adding ', a0.size, 'cpu', cpu_dt, 'advantage', advantage(cpu_dt, gpu_dt) #c = numpy.asarray(b0+b1) if asum.shape: assert numpy.allclose(asum, numpy.asarray(bsum))
Example #15
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise_collapse(): """ Test when all inputs have one(and the same) broadcastable dimension """ shape = (4, 5, 60) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, True, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse of all dimensions"
Example #16
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_setitem_matrix_bad_type(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8], dtype='float64') # test direct transfert from numpy try: # attempt to assign the ndarray b with setitem _a[1, :, :] = b assert False except TypeError as e: # print e assert True
Example #17
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_setitem_matrix_bad_ndim(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) try: # attempt to assign the ndarray b with setitem _a[:, :, 1] = _b assert False except ValueError as e: # print e assert True # test direct transfert from numpy try: # attempt to assign the ndarray b with setitem _a[1, :, :] = b assert False except ValueError as e: # print e assert True
Example #18
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_setitem_matrix_bad_shape(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) try: # attempt to assign the ndarray b with setitem _a[:, 1, 1] = _b assert False except ValueError as e: # print e assert True # test direct transfert from numpy try: # attempt to assign the ndarray b with setitem _a[1, 1, :] = b assert False except ValueError as e: # print e assert True
Example #19
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_setitem_matrixvector1(): a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([8, 9], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # set second column to 8,9 _a[:, 1] = _b a[:, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _a[:, 1] = b*100 a[:, 1] = b*100 assert numpy.allclose(a, numpy.asarray(_a)) row = theano._asarray([777, 888, 999], dtype='float32') _a[1, :] = row a[1, :] = row assert numpy.allclose(a, numpy.asarray(_a))
Example #20
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_setitem_matrixscalar0(): a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray(8, dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # set an element to 8 _a[1, 1] = _b a[1, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _a[1, 1] = theano._asarray(888, dtype='float32') a[1, 1] = theano._asarray(888, dtype='float32') assert numpy.allclose(a, numpy.asarray(_a)) # broadcast a 0 _a[1, 1] = 0 _a[0:2] = 0 _a[1:] = 0
Example #21
Source File: test_opt.py From D-VAE with MIT License | 6 votes |
def test_elemwise_fusion(): """ Test the the GpuElemwise fusion work correctly""" shape = (3, 4) a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a') b = tensor.fmatrix() c = tensor.fmatrix() f = pfunc([b, c], [a + b + c], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() for i, node in enumerate(topo): print(i, node, file=sys.stdout) assert len(topo) == 4 assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite) # let debugmode catch errors f(theano._asarray(numpy.random.rand(*shape), dtype='float32'), theano._asarray(numpy.random.rand(*shape), dtype='float32'))
Example #22
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_stride_manipulation(): a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') b = cuda_ndarray.CudaNdarray(a) v = b.view() v._dev_data += 0 c = numpy.asarray(v) assert numpy.all(a == c) sizeof_float = 4 offset = 0 b_strides = b._strides for i in xrange(len(b.shape)): offset += (b.shape[i]-1) * b_strides[i] v._set_stride(i, -b_strides[i]) v._dev_data += offset * sizeof_float c = numpy.asarray(v) assert numpy.all(c == [[5, 4, 3], [2, 1, 0]])
Example #23
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_getshape(): shapelist = [ ((1, 2, 3), (1, 2, 3)), ((1,), (1,)), ((1, 2, 3), (3, 2, 1)), ((1, 2, 3), (6,)), ((1, 2, 3, 2), (6, 2)), ((2, 3, 2), (6, 2)) ] def subtest(shape): a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32') b = cuda_ndarray.CudaNdarray(a) assert b.shape == a.shape for shape_1, shape_2 in shapelist: subtest(shape_1) subtest(shape_2)
Example #24
Source File: test_basic_ops.py From D-VAE with MIT License | 6 votes |
def test_elemwise0(): a = tcn.shared_constructor(theano._asarray(numpy.random.rand(4, 4), dtype='float32'), 'a') b = tensor.fmatrix() f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu) # check that we work inplace. assert (list(f.maker.fgraph.toposort()[1].op.destroy_map.items()) == [(0, [0])]) a0 = a.get_value() * 1.0 f(numpy.ones((4, 4), dtype='float32')) assert numpy.all(a0 + 1.0 == a.get_value())
Example #25
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_nvcc_bug(): """ The fct k_elemwise_unary_rowmajor_copy(used by cuda.copy()) in cuda_ndarray.cu is not well compiled with nvcc 3.0 and 3.1 beta. We found a workaround, so it sould work correctly. Without the workaround, this test fail. """ shape = (5, 4) aa = theano._asarray(numpy.random.rand(*shape), dtype='float32') a = aa[::, ::-1] b = cuda_ndarray.CudaNdarray(aa)[::, ::-1] c = copy.copy(b) d = copy.deepcopy(b) assert numpy.allclose(a, numpy.asarray(b)) assert numpy.allclose(a, numpy.asarray(c)) assert numpy.allclose(a, numpy.asarray(d)) b += b assert numpy.allclose(a+a, numpy.asarray(b)) assert numpy.allclose(a+a, numpy.asarray(c)) assert numpy.allclose(a, numpy.asarray(d))
Example #26
Source File: test_cuda_ndarray.py From D-VAE with MIT License | 6 votes |
def test_host_to_device(): #print >>sys.stdout, 'starting test_host_to_dev' for shape in ((), (3,), (2, 3), (3, 4, 5, 6)): a = theano._asarray(numpy.random.rand(*shape), dtype='float32') b = cuda_ndarray.CudaNdarray(a) c = numpy.asarray(b) assert numpy.all(a == c) # test with float32 dtype d = numpy.asarray(b, dtype='float32') assert numpy.all(a == d) # test with not float32 dtype try: numpy.asarray(b, dtype='int8') assert False except TypeError: pass
Example #27
Source File: test_basic_ops.py From D-VAE with MIT License | 5 votes |
def rand_cuda_ndarray(shape): return cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) # intentionally disabled
Example #28
Source File: opt.py From D-VAE with MIT License | 5 votes |
def perform(self, node, inputs, outputs): (a_val, a_ind, a_ptr, a_nrows, b) = inputs (out,) = outputs a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False) # out[0] = a.dot(b) out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype) assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
Example #29
Source File: test_basic.py From attention-lvcsr with MIT License | 5 votes |
def test_may_share_memory(): a = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3)) b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3)) as_ar = lambda a: theano._asarray(a, dtype='int32') for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False), (a, a.data, True), (a, a.indptr, True), (a, a.indices, True), (a, as_ar(a.shape), False), (a.data, a, True), (a.indptr, a, True), (a.indices, a, True), (as_ar(a.shape), a, False), (b, b.data, True), (b, b.indptr, True), (b, b.indices, True), (b, as_ar(b.shape), False), (b.data, b, True), (b.indptr, b, True), (b.indices, b, True), (as_ar(b.shape), b, False), (b.data, a, False), (b.indptr, a, False), (b.indices, a, False), (as_ar(b.shape), a, False), (a.transpose(), a, True), (b.transpose(), b, True), (a.transpose(), b, False), (b.transpose(), a, False), ]: assert SparseType.may_share_memory(a_, b_) == rep
Example #30
Source File: test_basic_ops.py From D-VAE with MIT License | 5 votes |
def test_elemwise_empty(): # test with 0 element a = tcn.shared_constructor(theano._asarray(numpy.random.rand(0, 0), dtype='float32'), 'a') b = tensor.fmatrix() f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu) f2 = pfunc([b], [], updates=[(a, a + b)], mode=mode_without_gpu) a0 = a.get_value() * 1.0 f(numpy.ones((0, 0), dtype='float32')) assert numpy.all(a0 + 1.0 == a.get_value())