Python theano.tensor.tensor4() Examples

The following are 30 code examples of theano.tensor.tensor4(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor , or try the search function .
Example #1
Source File: net_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1):
    x_shape, u_shape = input_shapes
    X_var = X_var or T.tensor4('X')
    U_var = U_var or T.matrix('U')
    X_diff_var = X_diff_var or T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var)
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var)

    l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis)
    l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add)
    l_y = L.flatten(l_x)
    l_y_diff_pred = L.flatten(l_x_diff_pred)

    X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
    loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.

    net_name = 'BilinearNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)])
    return net_name, input_vars, pred_layers, loss 
Example #2
Source File: net.py    From 3D-R2N2 with MIT License 6 votes vote down vote up
def __init__(self, random_seed=dt.datetime.now().microsecond, compute_grad=True):
        self.rng = np.random.RandomState(random_seed)

        self.batch_size = cfg.CONST.BATCH_SIZE
        self.img_w = cfg.CONST.IMG_W
        self.img_h = cfg.CONST.IMG_H
        self.n_vox = cfg.CONST.N_VOX
        self.compute_grad = compute_grad

        # (self.batch_size, 3, self.img_h, self.img_w),
        # override x and is_x_tensor4 when using multi-view network
        self.x = tensor.tensor4()
        self.is_x_tensor4 = True

        # (self.batch_size, self.n_vox, 2, self.n_vox, self.n_vox),
        self.y = tensor5()

        self.activations = []  # list of all intermediate activations
        self.loss = []  # final loss
        self.output = []  # final output
        self.error = []  # final output error
        self.params = []  # all learnable params
        self.grads = []  # will be filled out automatically
        self.setup() 
Example #3
Source File: test_conv.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_max_pooling():
    x = tensor.tensor4('x')
    num_channels = 4
    batch_size = 5
    x_size = 17
    y_size = 13
    pool_size = 3
    pool = MaxPooling((pool_size, pool_size))
    y = pool.apply(x)
    func = function([x], y)

    x_val = numpy.ones((batch_size, num_channels, x_size, y_size),
                       dtype=theano.config.floatX)
    assert_allclose(func(x_val),
                    numpy.ones((batch_size, num_channels,
                                x_size / pool_size,
                                y_size / pool_size)))
    pool.input_dim = (x_size, y_size)
    pool.get_dim('output') == (num_channels, x_size / pool_size + 1,
                               y_size / pool_size + 1) 
Example #4
Source File: test_conv.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_tied_biases():
    x = tensor.tensor4('x')
    num_channels = 4
    num_filters = 3
    batch_size = 5
    filter_size = (3, 3)
    conv = Convolutional(filter_size, num_filters, num_channels,
                         weights_init=Constant(1.), biases_init=Constant(2.),
                         tied_biases=True)
    conv.initialize()
    y = conv.apply(x)
    func = function([x], y)

    # Tied biases allows to pass images of different sizes
    x_val_1 = numpy.ones((batch_size, num_channels, 10,
                          12), dtype=theano.config.floatX)
    x_val_2 = numpy.ones((batch_size, num_channels, 23,
                          19), dtype=theano.config.floatX)

    assert_allclose(func(x_val_1),
                    numpy.prod(filter_size) * num_channels *
                    numpy.ones((batch_size, num_filters, 8, 10)) + 2)
    assert_allclose(func(x_val_2),
                    numpy.prod(filter_size) * num_channels *
                    numpy.ones((batch_size, num_filters, 21, 17)) + 2) 
Example #5
Source File: test_conv.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_no_input_size():
    # suppose x is outputted by some RNN
    x = tensor.tensor4('x')
    filter_size = (1, 3)
    num_filters = 2
    num_channels = 5
    c = Convolutional(filter_size, num_filters, num_channels, tied_biases=True,
                      weights_init=Constant(1.), biases_init=Constant(1.))
    c.initialize()
    out = c.apply(x)
    assert c.get_dim('output') == (2, None, None)
    assert out.ndim == 4

    c = Convolutional(filter_size, num_filters, num_channels,
                      tied_biases=False, weights_init=Constant(1.),
                      biases_init=Constant(1.))
    assert_raises_regexp(ValueError, 'Cannot infer bias size \S+',
                         c.initialize) 
Example #6
Source File: pooling.py    From Depth-Map-Prediction with GNU General Public License v3.0 6 votes vote down vote up
def test_cmrnorm():
    from theano.tests.unittest_tools import verify_grad

    xtest = np.random.rand(2,8,3,4)
    xtest = xtest.astype(theano.config.floatX)

    x = T.tensor4('x', dtype=theano.config.floatX)
    x.tag.test_value = xtest

    y = cmrnorm(x, input_shape=xtest.shape[1:])
    f = theano.function([x], y, mode='DEBUG_MODE')
    f(xtest)

    f = theano.function([x], gpu_from_host(T.grad(T.sum(y), wrt=x)),
                        mode='DEBUG_MODE')
    f(xtest)
    theano.printing.debugprint(f)

    T.verify_grad(lambda x: cmrnorm(x, input_shape=xtest.shape[1:]),
                  (xtest,),
                  rng=np.random.RandomState(0))

    print 'cmrnorm passed' 
Example #7
Source File: test_conv.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_convolutional_transpose():
    x = tensor.tensor4('x')
    num_channels = 4
    num_filters = 3
    image_size = (8, 6)
    original_image_size = (17, 13)
    batch_size = 5
    filter_size = (3, 3)
    step = (2, 2)
    conv = ConvolutionalTranspose(
        original_image_size, filter_size, num_filters, num_channels, step=step,
        image_size=image_size, weights_init=Constant(1.),
        biases_init=Constant(5.))
    conv.initialize()
    y = conv.apply(x)
    func = function([x], y)

    x_val = numpy.ones((batch_size, num_channels) + image_size,
                       dtype=theano.config.floatX)
    expected_value = num_channels * numpy.ones(
        (batch_size, num_filters) + original_image_size)
    expected_value[:, :, 2:-2:2, :] += num_channels
    expected_value[:, :, :, 2:-2:2] += num_channels
    expected_value[:, :, 2:-2:2, 2:-2:2] += num_channels
    assert_allclose(func(x_val), expected_value + 5) 
Example #8
Source File: test_conv.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_convolutional():
    x = tensor.tensor4('x')
    num_channels = 4
    num_filters = 3
    batch_size = 5
    filter_size = (3, 3)
    conv = Convolutional(filter_size, num_filters, num_channels,
                         image_size=(17, 13), weights_init=Constant(1.),
                         biases_init=Constant(5.))
    conv.initialize()
    y = conv.apply(x)
    func = function([x], y)

    x_val = numpy.ones((batch_size, num_channels, 17, 13),
                       dtype=theano.config.floatX)
    assert_allclose(func(x_val),
                    numpy.prod(filter_size) * num_channels *
                    numpy.ones((batch_size, num_filters, 15, 11)) + 5)
    conv.image_size = (17, 13)
    conv.batch_size = 2  # This should have effect on get_dim
    assert conv.get_dim('output') == (num_filters, 15, 11) 
Example #9
Source File: test_pool.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_DownsampleFactorMaxGrad(self):
        im = theano.tensor.tensor4()
        maxout = theano.tensor.tensor4()
        grad = theano.tensor.tensor4()

        for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
            f = theano.function([im, maxout, grad],
                                DownsampleFactorMaxGrad(ds=(3, 3),
                                                        ignore_border=False,
                                                        mode=mode)(im, maxout, grad),
                                on_unused_input='ignore')

            if mode == 'max':
                assert any(isinstance(n.op, MaxPoolGrad)
                           for n in f.maker.fgraph.toposort())
                assert not any(isinstance(n.op, AveragePoolGrad)
                               for n in f.maker.fgraph.toposort())
            else:
                assert not any(isinstance(n.op, MaxPoolGrad)
                               for n in f.maker.fgraph.toposort())
                assert any(isinstance(n.op, AveragePoolGrad)
                           for n in f.maker.fgraph.toposort()) 
Example #10
Source File: test_pool.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_max_pool_2d_2D_same_size(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        test_input_array = numpy.array([[[
            [1., 2., 3., 4.],
            [5., 6., 7., 8.]
        ]]]).astype(theano.config.floatX)
        test_answer_array = numpy.array([[[
            [0., 0., 0., 0.],
            [0., 6., 0., 8.]
        ]]]).astype(theano.config.floatX)
        input = tensor.tensor4(name='input')
        patch_size = (2, 2)
        op = max_pool_2d_same_size(input, patch_size)
        op_output = function([input], op)(test_input_array)
        utt.assert_allclose(op_output, test_answer_array)

        def mp(input):
            return max_pool_2d_same_size(input, patch_size)
        utt.verify_grad(mp, [test_input_array], rng=rng) 
Example #11
Source File: convolutional_nn.py    From Projects with MIT License 6 votes vote down vote up
def __init__(self,convolutional_layers,feature_maps,filter_shapes,poolsize,feedforward_layers,feedforward_nodes,classes,learning_rate,regularization):
        self.input = T.tensor4()
        self.convolutional_layers = []
        self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1],poolsize[0]))
        for i in range(1,convolutional_layers):
            self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],poolsize[i]))
        self.feedforward_layers = []
        self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),flattened,feedforward_nodes[0]))
        for i in range(1,feedforward_layers):
            self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
        self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
        self.params = []
        for l in self.convolutional_layers + self.feedforward_layers:
            self.params.extend(l.get_params())
        self.params.extend(self.output_layer.get_params())
        self.target = T.matrix()
        self.output = self.output_layer.output
        self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
        self.cost = self.cost.mean()
        for i in range(convolutional_layers+feedforward_layers+1):
            self.cost += regularization*(self.params[2*i]**2).mean()
        self.gparams = [T.grad(self.cost, param) for param in self.params]
        self.propogate = theano.function([self.input,self.target],self.cost,updates=[(param,param-learning_rate*gparam) for param,gparam in zip(self.params,self.gparams)],allow_input_downcast=True)
        self.classify = theano.function([self.input],self.output,allow_input_downcast=True) 
Example #12
Source File: convnet.py    From Projects with MIT License 6 votes vote down vote up
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes):
        self.input = T.tensor4()        
        self.convolutional_layers = []
        self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1]))
        for i in range(1,convolutional_layers):
            if i==2 or i==4:
                self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2)))
            else:
                self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1]))
        self.feedforward_layers = []
        self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),20480,feedforward_nodes[0]))
        for i in range(1,feedforward_layers):
            self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
        self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
        self.params = []
        for l in self.convolutional_layers + self.feedforward_layers:
            self.params.extend(l.get_params())
        self.params.extend(self.output_layer.get_params())
        self.target = T.matrix()
        self.output = self.output_layer.output
        self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
        self.cost = self.cost.mean()
        self.updates = self.adam(self.cost, self.params)
        self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
        self.classify = theano.function([self.input],self.output,allow_input_downcast=True) 
Example #13
Source File: conv2d_predict.py    From Projects with MIT License 6 votes vote down vote up
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes):
        self.input = T.tensor4()        
        self.convolutional_layers = []
        self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1]))
        for i in range(1,convolutional_layers):
            if i==3:
                self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2)))
            else:
                self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1]))
        self.feedforward_layers = []
        self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),40000,feedforward_nodes[0]))
        for i in range(1,feedforward_layers):
            self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
        self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
        self.params = []
        for l in self.convolutional_layers + self.feedforward_layers:
            self.params.extend(l.get_params())
        self.params.extend(self.output_layer.get_params())
        self.target = T.matrix()
        self.output = self.output_layer.output
        self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
        self.cost = self.cost.mean()
        self.updates = self.adam(self.cost, self.params)
        self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
        self.classify = theano.function([self.input],self.output,allow_input_downcast=True) 
Example #14
Source File: conv_net.py    From Projects with MIT License 6 votes vote down vote up
def __init__(self,convolutional_layers,feature_maps,filter_shapes,poolsize,feedforward_layers,feedforward_nodes,classes,regularization):
        self.input = T.tensor4()
        self.convolutional_layers = []
        self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1],poolsize[0]))
        for i in range(1,convolutional_layers):
            self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],poolsize[i]))
        self.feedforward_layers = []
        self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),flattened,feedforward_nodes[0]))
        for i in range(1,feedforward_layers):
            self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
        self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
        self.params = []
        for l in self.convolutional_layers + self.feedforward_layers:
            self.params.extend(l.get_params())
        self.params.extend(self.output_layer.get_params())
        self.target = T.matrix()
        self.output = self.output_layer.output
        self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
        self.cost = self.cost.mean()
        for i in range(convolutional_layers+feedforward_layers+1):
            self.cost += regularization*(self.params[2*i]**2).mean()
        self.updates = self.adam(self.cost,self.params)
        self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
        self.classify = theano.function([self.input],self.output,allow_input_downcast=True) 
Example #15
Source File: bi_lstm_cnn_crf.py    From LasagneNLP with Apache License 2.0 6 votes vote down vote up
def test():
    energies_var = T.tensor4('energies', dtype=theano.config.floatX)
    targets_var = T.imatrix('targets')
    masks_var = T.matrix('masks', dtype=theano.config.floatX)
    layer_input = lasagne.layers.InputLayer([2, 2, 3, 3], input_var=energies_var)
    out = lasagne.layers.get_output(layer_input)
    loss = crf_loss(out, targets_var, masks_var)
    prediction, acc = crf_accuracy(energies_var, targets_var)

    fn = theano.function([energies_var, targets_var, masks_var], [loss, prediction, acc])

    energies = np.array([[[[10, 15, 20], [5, 10, 15], [3, 2, 0]], [[5, 10, 1], [5, 10, 1], [5, 10, 1]]],
                         [[[5, 6, 7], [2, 3, 4], [2, 1, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], dtype=np.float32)

    targets = np.array([[0, 1], [0, 2]], dtype=np.int32)

    masks = np.array([[1, 1], [1, 0]], dtype=np.float32)

    l, p, a = fn(energies, targets, masks)
    print l
    print p
    print a 
Example #16
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_channelwise_locally_connected2d(x_shape, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.LocallyConnected2DLayer(l_x, x_shape[0], filter_size=filter_size, channelwise=True,
                                        stride=1, pad='same', flip_filters=flip_filters,
                                        untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = channelwise_locally_connected2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop channelwise locally connected time for x_shape=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-7) 
Example #17
Source File: unet_trainer.py    From luna16 with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self):
        metric_names = ['Loss','L2','Accuracy','Dice']
        super(UNetTrainer, self).__init__(metric_names)

        input_var = T.tensor4('inputs')
        target_var = T.tensor4('targets', dtype='int64')
        weight_var = T.tensor4('weights')


        logging.info("Defining network")
        net_dict = unet.define_network(input_var)
        self.network = net_dict['out']
        train_fn, val_fn, l_r = unet.define_updates(self.network, input_var, target_var, weight_var)

        self.train_fn = train_fn
        self.val_fn = val_fn
        self.l_r = l_r 
Example #18
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_locally_connected2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.LocallyConnected2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                                        flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("locally connected time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = locally_connected2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop locally connected time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
Example #19
Source File: test_layers_theano.py    From visual_dynamics with MIT License 6 votes vote down vote up
def test_conv2d(x_shape, num_filters, filter_size, flip_filters, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = L.Conv2DLayer(l_x, num_filters, filter_size=filter_size, stride=1, pad='same',
                           flip_filters=flip_filters, untie_biases=True, nonlinearity=None, b=None)
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    tic()
    loop_conv = conv2d(X, l_conv.W.get_value(), flip_filters=flip_filters)
    toc("loop conv time for x_shape=%r, num_filters=%r, filter_size=%r, flip_filters=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, filter_size, flip_filters, batch_size))

    assert np.allclose(conv, loop_conv, atol=1e-6) 
Example #20
Source File: extensions.py    From Diffusion-Probabilistic-Models with MIT License 6 votes vote down vote up
def __init__(self, model, algorithm, X, path, n_samples=49, **kwargs):
        """
        Generate samples from the model. The do() function is called as an extension during training.
        Generates 3 types of samples:
        - Sample from generative model
        - Sample from image denoising posterior distribution (default signal to noise of 1)
        - Sample from image inpainting posterior distribution (inpaint left half of image)
        """

        super(PlotSamples, self).__init__(**kwargs)
        self.model = model
        self.path = path
        n_samples = np.min([n_samples, X.shape[0]])
        self.X = X[:n_samples].reshape(
            (n_samples, model.n_colors, model.spatial_width, model.spatial_width))
        self.n_samples = n_samples
        X_noisy = T.tensor4('X noisy samp', dtype=theano.config.floatX)
        t = T.matrix('t samp', dtype=theano.config.floatX)
        self.get_mu_sigma = theano.function([X_noisy, t], model.get_mu_sigma(X_noisy, t),
            allow_input_downcast=True) 
Example #21
Source File: test_pool.py    From D-VAE with MIT License 6 votes vote down vote up
def test_max_pool_2d_2D_same_size(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        test_input_array = numpy.array([[[
            [1., 2., 3., 4.],
            [5., 6., 7., 8.]
        ]]]).astype(theano.config.floatX)
        test_answer_array = numpy.array([[[
            [0., 0., 0., 0.],
            [0., 6., 0., 8.]
        ]]]).astype(theano.config.floatX)
        input = tensor.tensor4(name='input')
        patch_size = (2, 2)
        op = max_pool_2d_same_size(input, patch_size)
        op_output = function([input], op)(test_input_array)
        utt.assert_allclose(op_output, test_answer_array)

        def mp(input):
            return max_pool_2d_same_size(input, patch_size)
        utt.verify_grad(mp, [test_input_array], rng=rng) 
Example #22
Source File: test_conv.py    From D-VAE with MIT License 6 votes vote down vote up
def test_broadcast_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x1 = T.tensor4('x')
    x1_data = rng.randn(1, 1, 300, 300)
    sigma = T.scalar('sigma')
    sigma_data = 20
    window_radius = 3

    filter_1d = T.arange(-window_radius, window_radius+1)
    filter_1d = filter_1d.astype(theano.config.floatX)
    filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
    filter_1d = filter_1d / filter_1d.sum()

    filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])

    y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
                                  filter_shape=[1, 1, None, None])
    theano.grad(y.sum(), sigma) 
Example #23
Source File: test_conv.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_broadcast_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x1 = T.tensor4('x')
    x1_data = rng.randn(1, 1, 300, 300)
    sigma = T.scalar('sigma')
    sigma_data = 20
    window_radius = 3

    filter_1d = T.arange(-window_radius, window_radius+1)
    filter_1d = filter_1d.astype(theano.config.floatX)
    filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
    filter_1d = filter_1d / filter_1d.sum()

    filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])

    y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
                                  filter_shape=[1, 1, None, None])
    theano.grad(y.sum(), sigma) 
Example #24
Source File: conv2d_crossvalidation.py    From Projects with MIT License 6 votes vote down vote up
def __init__(self,convolutional_layers,feature_maps,filter_shapes,feedforward_layers,feedforward_nodes,classes):
        self.input = T.tensor4()        
        self.convolutional_layers = []
        self.convolutional_layers.append(convolutional_layer(self.input,feature_maps[1],feature_maps[0],filter_shapes[0][0],filter_shapes[0][1]))
        for i in range(1,convolutional_layers):
            if i==3:
                self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1],maxpool=(2,2)))
            else:
                self.convolutional_layers.append(convolutional_layer(self.convolutional_layers[i-1].output,feature_maps[i+1],feature_maps[i],filter_shapes[i][0],filter_shapes[i][1]))
        self.feedforward_layers = []
        self.feedforward_layers.append(feedforward_layer(self.convolutional_layers[-1].output.flatten(2),40000,feedforward_nodes[0]))
        for i in range(1,feedforward_layers):
            self.feedforward_layers.append(feedforward_layer(self.feedforward_layers[i-1].output,feedforward_nodes[i-1],feedforward_nodes[i]))
        self.output_layer = feedforward_layer(self.feedforward_layers[-1].output,feedforward_nodes[-1],classes)
        self.params = []
        for l in self.convolutional_layers + self.feedforward_layers:
            self.params.extend(l.get_params())
        self.params.extend(self.output_layer.get_params())
        self.target = T.matrix()
        self.output = self.output_layer.output
        self.cost = -self.target*T.log(self.output)-(1-self.target)*T.log(1-self.output)
        self.cost = self.cost.mean()
        self.updates = self.adam(self.cost, self.params)
        self.propogate = theano.function([self.input,self.target],self.cost,updates=self.updates,allow_input_downcast=True)
        self.classify = theano.function([self.input],self.output,allow_input_downcast=True) 
Example #25
Source File: base.py    From sklearn-theano with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, normalization_size,
                 normalization_factor,
                 normalization_exponent,
                 axis='channels', input_type=T.tensor4):
        self.normalization_size = normalization_size
        self.normalization_factor = normalization_factor
        self.normalization_exponent = normalization_exponent
        self.axis = axis
        self.input_type = input_type

        self._build_expression() 
Example #26
Source File: base.py    From sklearn-theano with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, input_type=T.tensor4):
        self.input_type = input_type

        self._build_expression() 
Example #27
Source File: servoing_policy.py    From visual_dynamics with MIT License 5 votes vote down vote up
def __init__(self, *args, **kwargs):
        super(TheanoServoingPolicy, self).__init__(*args, **kwargs)
        self.jac_fn = None
        self.jac_z_fn = None
        self.A_b_c_split2_fn = None
        self.phi2_fn = None
        self.pi2_fn = None
        self.A_b_c_split_fn = None
        self.phi_fn = None
        self.pi_fn = None
        X_var, U_var = self.predictor.input_vars
        X_target_var = T.tensor4('x_target')
        U_lin_var = T.matrix('u_lin')
        alpha_var = theano.tensor.scalar(name='alpha')
        self.input_vars = [X_var, U_var, X_target_var, U_lin_var, alpha_var]
        w_var = T.vector('w')
        lambda_var = T.vector('lambda')
        self.param_vars = [w_var, lambda_var]
        if len(self.repeats) <= 256 * 3:
            self.max_batch_size = 100
        else:
            self.max_batch_size = (100 * 256 * 3) // len(self.repeats)
            print("Using maximum batch size of %d" % self.max_batch_size)
        # if len(self.repeats) <= 128 * 3:
        #     self.max_batch_size = 100
        # else:
        #     self.max_batch_size = (100 * 128 * 3) // len(self.repeats)
        #     print("Using maximum batch size of %d" % self.max_batch_size)
        assert self.max_batch_size > 0 
Example #28
Source File: test_conv.py    From attention-lvcsr with MIT License 5 votes vote down vote up
def setUp(self):
        super(TestConv2D, self).setUp()
        self.input = T.tensor4('input', dtype=self.dtype)
        self.input.name = 'default_V'
        self.filters = T.tensor4('filters', dtype=self.dtype)
        self.filters.name = 'default_filters'
        if not conv.imported_scipy_signal and theano.config.cxx == "":
            raise SkipTest("conv2d tests need SciPy or a c++ compiler") 
Example #29
Source File: test_lasagne.py    From docker-python with Apache License 2.0 5 votes vote down vote up
def test_network_definition(self):
        input_var = T.tensor4('X')

        network = lasagne.layers.InputLayer((None, 3, 32, 32), input_var)
        network = lasagne.layers.Conv2DLayer(network, 64, (3, 3))

        params = lasagne.layers.get_all_params(network, trainable=True)

        self.assertEqual(2, len(params)) 
Example #30
Source File: base.py    From sklearn-theano with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fuse(building_blocks, fuse_dim=4, input_variables=None, entry_expression=None,
         output_expressions=-1, input_dtype='float32'):

    num_blocks = len(building_blocks)

    if isinstance(output_expressions, numbers.Number):
        output_expressions = [output_expressions]

    # account for indices -1, -2 etc
    output_expressions = [oe % num_blocks for oe in output_expressions]

    if fuse_dim == 4:
        fuse_block = T.tensor4
    else:
        fuse_block = T.matrix

    if input_variables is None and entry_expression is None:
        input_variables = fuse_block(dtype=input_dtype)
        entry_expression = input_variables

    current_expression = entry_expression
    outputs = []

    for i, block in enumerate(building_blocks):
        if not hasattr(block, "expression_"):
            block._build_expression()
        current_expression = theano.clone(
            block.expression_,
            replace={block.input_: current_expression},
            strict=False)
        if i in output_expressions:
            outputs.append(current_expression)

    return outputs, input_variables