Python theano.tensor.signal.downsample.max_pool_2d() Examples

The following are 30 code examples of theano.tensor.signal.downsample.max_pool_2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module theano.tensor.signal.downsample , or try the search function .
Example #1
Source File: 5_convolutional_net.py    From Theano-Tutorials with MIT License 6 votes vote down vote up
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='full'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)

    pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, l4, pyx 
Example #2
Source File: downsample.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #3
Source File: lrn.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def local_response_normalization_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling, works for N-D tensors (2D/3D/etc.)
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    batch_size, num_channels = in_vw.symbolic_shape()[:2]
    squared = T.sqr(in_var)
    reshaped = squared.reshape((batch_size, 1, num_channels, -1))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape(in_vw.symbolic_shape())
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
Example #4
Source File: downsample.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #5
Source File: 05_convolutional_network.py    From computer-vision-resources with MIT License 6 votes vote down vote up
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='full'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)

    pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, l4, pyx 
Example #6
Source File: conv_net_classes.py    From personality-detection with MIT License 6 votes vote down vote up
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
Example #7
Source File: layers.py    From kaggle-galaxies with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def output(self, *args, **kwargs):
        input = self.input_layer.output(*args, **kwargs)

        if self.implementation == 'max_pool':
            # max_pool_2d operates on the last 2 dimensions of the input. So shift the feature dim to be last.
            shuffle_order = range(0, self.feature_dim) + range(self.feature_dim + 1, input.ndim) + [self.feature_dim]
            unshuffle_order = range(0, self.feature_dim) + [input.ndim - 1] + range(self.feature_dim, input.ndim - 1)

            input_shuffled = input.dimshuffle(*shuffle_order)
            output_shuffled = max_pool_2d(input_shuffled, (1, self.pool_size))
            output = output_shuffled.dimshuffle(*unshuffle_order)

        elif self.implementation == 'reshape':
            out_feature_dim_size = self.get_output_shape()[self.feature_dim]
            pool_shape = self.input_shape[:self.feature_dim] + (out_feature_dim_size, self.pool_size) + self.input_shape[self.feature_dim + 1:]
            
            input_reshaped = input.reshape(pool_shape)
            output = T.max(input_reshaped, axis=self.feature_dim + 1)
        else:
            raise "Uknown implementation string '%s'" % self.implementation

        return output 
Example #8
Source File: layers.py    From kaggle-galaxies with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, input_layer, pool_size, feature_dim=1, implementation='max_pool'):
        """
        pool_size: the number of inputs to be pooled together.

        feature_dim: the dimension of the input to pool across. By default this is 1
        for both dense and convolutional layers (bc01).
        For c01b, this has to be set to 0.

        implementation:
            - 'max_pool': uses theano's max_pool_2d - doesn't work for input dimension > 1024!
            - 'reshape': reshapes the tensor to create a 'pool' dimension and then uses T.max.
        """
        self.pool_size = pool_size
        self.feature_dim = feature_dim
        self.implementation = implementation
        self.input_layer = input_layer
        self.input_shape = self.input_layer.get_output_shape()
        self.mb_size = self.input_layer.mb_size

        if self.input_shape[self.feature_dim] % self.pool_size != 0:
            raise "Feature dimension is not a multiple of the pool size. Doesn't work!"

        self.params = []
        self.bias_params = [] 
Example #9
Source File: downsample.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #10
Source File: conv_net_classes.py    From deeplearning4nlp-tutorial with Apache License 2.0 6 votes vote down vote up
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
Example #11
Source File: nn_classes.py    From optimus with Apache License 2.0 6 votes vote down vote up
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
Example #12
Source File: lrn.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def local_response_normalization_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling, works for N-D tensors (2D/3D/etc.)
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    batch_size, num_channels = in_vw.symbolic_shape()[:2]
    squared = T.sqr(in_var)
    reshaped = squared.reshape((batch_size, 1, num_channels, -1))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape(in_vw.symbolic_shape())
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
Example #13
Source File: lrn.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def local_response_normalization_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling, works for N-D tensors (2D/3D/etc.)
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    batch_size, num_channels = in_vw.symbolic_shape()[:2]
    squared = T.sqr(in_var)
    reshaped = squared.reshape((batch_size, 1, num_channels, -1))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape(in_vw.symbolic_shape())
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
Example #14
Source File: downsample.py    From treeano with Apache License 2.0 6 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #15
Source File: lrn.py    From treeano with Apache License 2.0 6 votes vote down vote up
def local_response_normalization_2d_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    b, ch, r, c = in_vw.symbolic_shape()
    squared = T.sqr(in_var)
    reshaped = squared.reshape((b, 1, ch, r * c))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape((b, ch, r, c))
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
Example #16
Source File: lrn.py    From treeano with Apache License 2.0 6 votes vote down vote up
def local_response_normalization_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling, works for N-D tensors (2D/3D/etc.)
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    batch_size, num_channels = in_vw.symbolic_shape()[:2]
    squared = T.sqr(in_var)
    reshaped = squared.reshape((batch_size, 1, num_channels, -1))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape(in_vw.symbolic_shape())
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
Example #17
Source File: downsample.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #18
Source File: downsample.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #19
Source File: lrn.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def local_response_normalization_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling, works for N-D tensors (2D/3D/etc.)
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    batch_size, num_channels = in_vw.symbolic_shape()[:2]
    squared = T.sqr(in_var)
    reshaped = squared.reshape((batch_size, 1, num_channels, -1))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape(in_vw.symbolic_shape())
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
Example #20
Source File: downsample.py    From treeano with Apache License 2.0 5 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        pool_size = network.find_hyperparameter(["pool_size"])
        stride = network.find_hyperparameter(["pool_stride",
                                              "stride"],
                                             None)
        pad = network.find_hyperparameter(["pool_pad", "pad"], (0, 0))
        ignore_border = network.find_hyperparameter(["ignore_border"],
                                                    True)
        if ((stride is not None)
                and (stride != pool_size)
                and (not ignore_border)):
            # as of 20150813
            # for more information, see:
            # https://groups.google.com/forum/#!topic/lasagne-users/t_rMTLAtpZo
            msg = ("Setting stride not equal to pool size and not ignoring"
                   " border results in using a slower (cpu-based)"
                   " implementation")
            # making this an assertion instead of a warning to make sure it
            # is done
            assert False, msg

        out_shape = pool_output_shape(
            input_shape=in_vw.shape,
            axes=(2, 3),
            pool_shape=pool_size,
            strides=stride,
            pads=pad)
        out_var = max_pool_2d(input=in_vw.variable,
                              ds=pool_size,
                              st=stride,
                              ignore_border=ignore_border,
                              padding=pad,
                              mode=mode)

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #21
Source File: conv_layer.py    From kaggle-seizure-prediction with MIT License 5 votes vote down vote up
def __init__(self, rng, input, filter_shape, image_shape, poolsize, activation, weights_variance, subsample):

        assert image_shape[1] == filter_shape[1]
        self.input =input

        if activation == 'tanh':
            activation_function = lambda x: T.tanh(x)
            fan_in = np.prod(filter_shape[1:])
            fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize))
            W_bound = np.sqrt(6. / (fan_in + fan_out))
            W_values = np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype='float32')
            b_values = np.zeros((filter_shape[0],), dtype='float32')

        elif activation == 'relu':
            activation_function = lambda x: T.maximum(0.0, x)
            W_values = np.asarray(rng.normal(0.0, weights_variance, size=filter_shape), dtype='float32')
            b_values = np.ones((filter_shape[0],), dtype='float32') / 10.0
        else:
            raise ValueError('unknown activation function')

        self.W = theano.shared(value=W_values, name='W', borrow=True)
        self.b = theano.shared(value=b_values, name='b', borrow=True)

        conv_out = conv.conv2d(input, self.W, filter_shape=filter_shape, image_shape=image_shape, subsample=subsample)
        pooled_out = downsample.max_pool_2d(conv_out, poolsize, ignore_border=True) if poolsize[1] > 1 else conv_out
        self.output = activation_function(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.weights = [self.W, self.b] 
Example #22
Source File: layers.py    From RATM with MIT License 5 votes vote down vote up
def forward(self, inputs):
        return max_pool_2d(
            input=inputs,
            ds=self.pool_size,
            ignore_border=self.ignore_border,
            st=self.stride
        ) 
Example #23
Source File: downsample_test.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_pool_output_shape_2d():
    def test_same(input_shape, local_sizes, strides, pads, ignore_border):
        res = tn.downsample.pool_output_shape(
            input_shape,
            (2, 3),
            local_sizes,
            strides,
            pads,
            ignore_border,
        )
        from theano.tensor.signal.downsample import max_pool_2d
        ans = max_pool_2d(
            T.constant(np.random.randn(*input_shape).astype(fX)),
            ds=local_sizes,
            st=strides,
            ignore_border=ignore_border,
            padding=pads,
        ).shape.eval()
        print(ans, res)
        np.testing.assert_equal(ans, res)

    # tests w/ ignore border
    test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 0), True)
    test_same((1, 1, 5, 6), (2, 3), (2, 2), (0, 0), True)
    test_same((1, 1, 1, 1), (2, 3), (2, 2), (0, 0), True)
    test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 1), True)
    test_same((1, 1, 5, 6), (2, 3), (2, 2), (1, 0), True)
    test_same((1, 1, 1, 1), (2, 3), (2, 2), (1, 1), True)

    # tests w/o ignore border, and stride <= pool_size
    test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 0), False)
    test_same((1, 1, 5, 6), (2, 3), (2, 2), (0, 0), False)
    test_same((1, 1, 1, 1), (2, 3), (2, 2), (0, 0), False)

    # tests w/o ignore border, and stride > pool_size
    test_same((1, 1, 5, 6), (2, 3), (3, 3), (0, 0), False)
    test_same((1, 1, 5, 6), (2, 3), (3, 3), (0, 0), False)
    test_same((1, 1, 1, 1), (2, 3), (3, 3), (0, 0), False) 
Example #24
Source File: convolutions.py    From fancy-cnn with MIT License 5 votes vote down vote up
def get_output(self, train):
        X = self.get_input(train)
        newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
        Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
        output = downsample.max_pool_2d(Y, ds=self.pool_size, st=self.stride, ignore_border=self.ignore_border)
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
        return theano.tensor.reshape(output, newshape) #shape is (num_samples, num_timesteps, stack_size, new_nb_row, new_nb_col) 
Example #25
Source File: convolutional.py    From LSTM_Anomaly_Detector with MIT License 5 votes vote down vote up
def get_output(self, train):
        X = self.get_input(train)
        output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border)
        return output 
Example #26
Source File: layers.py    From anna with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def output(self, *args, **kwargs):
        input = self.input_layer.output(*args, **kwargs)
        return max_pool_2d(input, (1, self.ds_factor), self.ignore_border) 
Example #27
Source File: downsample_test.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_pool_output_shape_2d():
    def test_same(input_shape, local_sizes, strides, pads, ignore_border):
        res = tn.downsample.pool_output_shape(
            input_shape,
            (2, 3),
            local_sizes,
            strides,
            pads,
            ignore_border,
        )
        from theano.tensor.signal.downsample import max_pool_2d
        ans = max_pool_2d(
            T.constant(np.random.randn(*input_shape).astype(fX)),
            ds=local_sizes,
            st=strides,
            ignore_border=ignore_border,
            padding=pads,
        ).shape.eval()
        print(ans, res)
        np.testing.assert_equal(ans, res)

    # tests w/ ignore border
    test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 0), True)
    test_same((1, 1, 5, 6), (2, 3), (2, 2), (0, 0), True)
    test_same((1, 1, 1, 1), (2, 3), (2, 2), (0, 0), True)
    test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 1), True)
    test_same((1, 1, 5, 6), (2, 3), (2, 2), (1, 0), True)
    test_same((1, 1, 1, 1), (2, 3), (2, 2), (1, 1), True)

    # tests w/o ignore border, and stride <= pool_size
    test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 0), False)
    test_same((1, 1, 5, 6), (2, 3), (2, 2), (0, 0), False)
    test_same((1, 1, 1, 1), (2, 3), (2, 2), (0, 0), False)

    # tests w/o ignore border, and stride > pool_size
    test_same((1, 1, 5, 6), (2, 3), (3, 3), (0, 0), False)
    test_same((1, 1, 5, 6), (2, 3), (3, 3), (0, 0), False)
    test_same((1, 1, 1, 1), (2, 3), (3, 3), (0, 0), False) 
Example #28
Source File: spp_net.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def compute_output(self, network, in_vw):
        spp_levels = network.find_hyperparameter(["spp_levels"])
        # FIXME generalize to other shape dimensions.
        # assume this is of the form bc01 (batch, channel, width, height)

        # shape calculation
        in_shape = in_vw.symbolic_shape()
        if in_vw.shape[1] is None:
            out_shape1 = None
        else:
            out_shape1 = in_vw.shape[1] * sum(d1 * d2 for d1, d2 in spp_levels)
        out_shape = (in_vw.shape[0], out_shape1)

        # compute out
        mp_kwargs_list = [spp_max_pool_kwargs(in_shape[2:], spp_level)
                          for spp_level in spp_levels]
        pooled = [downsample.max_pool_2d(in_vw.variable, **kwargs)
                  for kwargs in mp_kwargs_list]
        out_var = T.concatenate([p.flatten(2) for p in pooled], axis=1)

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #29
Source File: downsample.py    From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        pool_size = network.find_hyperparameter(["pool_size"])
        stride = network.find_hyperparameter(["pool_stride",
                                              "stride"],
                                             None)
        pad = network.find_hyperparameter(["pool_pad", "pad"], (0, 0))
        ignore_border = network.find_hyperparameter(["ignore_border"],
                                                    True)
        if ((stride is not None)
                and (stride != pool_size)
                and (not ignore_border)):
            # as of 20150813
            # for more information, see:
            # https://groups.google.com/forum/#!topic/lasagne-users/t_rMTLAtpZo
            msg = ("Setting stride not equal to pool size and not ignoring"
                   " border results in using a slower (cpu-based)"
                   " implementation")
            # making this an assertion instead of a warning to make sure it
            # is done
            assert False, msg

        out_shape = pool_output_shape(
            input_shape=in_vw.shape,
            axes=(2, 3),
            pool_shape=pool_size,
            strides=stride,
            pads=pad)
        out_var = max_pool_2d(input=in_vw.variable,
                              ds=pool_size,
                              st=stride,
                              ignore_border=ignore_border,
                              padding=pad,
                              mode=mode)

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
Example #30
Source File: convpool.py    From theanet with Apache License 2.0 5 votes vote down vote up
def __init__(self, inpt, num_maps, in_sz, pool_sz, ignore_border=False):
        """
        Pool Layer to follow Convolutional Layer
        :param inpt:
        :param pool_sz:
        :param ignore_border: When True, (5,5) input with ds=(2,2)
            will generate a (2,2) output. (3,3) otherwise.
        """
        self.output = downsample.max_pool_2d(inpt, (pool_sz, pool_sz),
                                             ignore_border=ignore_border)

        if ignore_border:
            self.out_sz = in_sz//pool_sz
        else:
            self.out_sz = math.ceil(in_sz/pool_sz)

        self.params = []
        self.inpt = inpt
        self.num_maps = num_maps
        self.ignore_border = ignore_border
        self.args = (num_maps, in_sz, pool_sz, ignore_border)
        self.n_out = num_maps * self.out_sz ** 2
        self.representation = (
            "Pool Maps:{:2d} Pool_sz:{} Border:{} Output:{:2d}"
            "".format(num_maps, pool_sz,
                      "Ignore" if ignore_border else "Keep",
                      self.out_sz))