Python chainer.Sequential() Examples

The following are 30 code examples of chainer.Sequential(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer , or try the search function .
Example #1
Source File: test_external_converter.py    From chainer with MIT License 6 votes vote down vote up
def test_export_external_converters_overwrite(tmpdir, check_model_expect):
    path = str(tmpdir)

    model = chainer.Sequential(chainer.functions.sigmoid)
    x = input_generator.positive_increasing(2, 5)

    def custom_converter(params):
        return onnx_helper.make_node(
            'Tanh', params.input_names, params.output_names),

    addon_converters = {'Sigmoid': custom_converter}
    export_testcase(model, x, path, external_converters=addon_converters)

    tanh_outputs = chainer.functions.tanh(x).array
    output_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
    onnx_helper.write_tensor_pb(output_path, '', tanh_outputs)  # overwrite

    check_model_expect(path) 
Example #2
Source File: test_chainer.py    From optuna with MIT License 6 votes vote down vote up
def test_chainer_pruning_extension() -> None:
    def objective(trial: optuna.trial.Trial) -> float:

        model = L.Classifier(chainer.Sequential(L.Linear(None, 2)))
        optimizer = chainer.optimizers.Adam()
        optimizer.setup(model)

        train_iter = chainer.iterators.SerialIterator(FixedValueDataset(), 16)
        updater = chainer.training.StandardUpdater(train_iter, optimizer)
        trainer = chainer.training.Trainer(updater, (1, "epoch"))
        trainer.extend(
            optuna.integration.chainer.ChainerPruningExtension(trial, "main/loss", (1, "epoch"))
        )

        trainer.run(show_loop_exception_msg=False)
        return 1.0

    study = optuna.create_study(pruner=DeterministicPruner(True))
    study.optimize(objective, n_trials=1)
    assert study.trials[0].state == optuna.trial.TrialState.PRUNED

    study = optuna.create_study(pruner=DeterministicPruner(False))
    study.optimize(objective, n_trials=1)
    assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
    assert study.trials[0].value == 1.0 
Example #3
Source File: chainermn_simple.py    From optuna with MIT License 6 votes vote down vote up
def create_model(trial):
    # We optimize the numbers of layers and their units.
    n_layers = trial.suggest_int("n_layers", 1, 3)

    layers = []
    for i in range(n_layers):
        n_units = trial.suggest_int("n_units_l{}".format(i), 4, 128, log=True)
        layers.append(L.Linear(None, n_units))
        layers.append(F.relu)
    layers.append(L.Linear(None, 10))

    return chainer.Sequential(*layers)


# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args). 
Example #4
Source File: chainer_integration.py    From optuna with MIT License 6 votes vote down vote up
def create_model(trial):
    # We optimize the numbers of layers and their units.
    n_layers = trial.suggest_int("n_layers", 1, 3)

    layers = []
    for i in range(n_layers):
        n_units = int(trial.suggest_loguniform("n_units_l{}".format(i), 32, 256))
        layers.append(L.Linear(None, n_units))
        layers.append(F.relu)
    layers.append(L.Linear(None, 10))

    return chainer.Sequential(*layers)


# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args). 
Example #5
Source File: __init__.py    From kiss with GNU General Public License v3.0 6 votes vote down vote up
def build_decoder(vocab_size, N=6, model_size=512, ff_size=2048, num_heads=8, dropout_ratio=0.1):
    attention = MultiHeadedAttention(num_heads, model_size, dropout_ratio=dropout_ratio)
    feed_forward = PositionwiseFeedForward(model_size, ff_size, dropout_ratio=dropout_ratio)
    positional_encoding = PositionalEncoding(model_size, dropout_ratio=dropout_ratio)

    decoder_layer = DecoderLayer(
        model_size,
        copy.deepcopy(attention),
        copy.deepcopy(attention),
        feed_forward,
        dropout_ratio=dropout_ratio
    )

    decoder = Decoder(decoder_layer, N)

    embeddings = Embedding(model_size, vocab_size)

    return chainer.Sequential(embeddings, positional_encoding), decoder 
Example #6
Source File: sequential.py    From chainer with MIT License 6 votes vote down vote up
def __reduce__(self):
        n_lambda = 0
        for layer in self._layers:
            if callable(layer) and hasattr(layer, '__name__') \
                    and layer.__name__ == '<lambda>':
                n_lambda += 1

        if n_lambda > 0:
            raise ValueError(
                'This Sequential object has at least one lambda function as '
                'its component. Lambda function can not be pickled, so please '
                'consider to use functools.partial instead of the lambda '
                'function or use "dill", which is an external package that '
                'enables pickling an object including lambda functions intead '
                'of built-in pickle.')
        return super(Sequential, self).__reduce__() 
Example #7
Source File: test_create_mnbn_model.py    From chainer with MIT License 6 votes vote down vote up
def check_create_mnbn_model_sequential(self, use_gpu, use_chx):
        size = 3
        model = chainer.Sequential(
            chainer.links.Convolution2D(
                None, size, 1, 1, 1, nobias=True),
            chainer.links.BatchNormalization(size),
            chainer.functions.relu
        )
        mnbn_model = chainermn.links.create_mnbn_model(model,
                                                       self.communicator)

        device = get_device(self.communicator.intra_rank if use_gpu else None,
                            use_chx)
        mnbn_model.to_device(device)

        with chainer.using_device(mnbn_model.device):
            x = mnbn_model.xp.zeros((1, 1, 1, 1))
            mnbn_model(x) 
Example #8
Source File: test_sequential.py    From chainer with MIT License 6 votes vote down vote up
def test_str(self):
        self.assertEqual(str(chainer.Sequential()), 'Sequential()')

        expected = '''\
  (0): Sequential(
    (0): Linear(in_size=None, out_size=3, nobias=False),
    (1): Linear(in_size=3, out_size=2, nobias=False),
  ),
  (1): Linear(in_size=2, out_size=3, nobias=False),
  (2): lambda x: functions.leaky_relu(x, slope=0.2),
'''
        layers = [
            self.s1,
            self.l3,
            lambda x: functions.leaky_relu(x, slope=0.2),
        ]
        if six.PY3:
            # In Python2, it fails because of different id of the function.
            layer = functools.partial(functions.leaky_relu, slope=0.2)
            layers.append(layer)
            expected += '  (3): %s,\n' % layer
        expected = 'Sequential(\n%s)' % expected
        s = chainer.Sequential(*layers)
        self.assertEqual(str(s), expected) 
Example #9
Source File: test_sequential.py    From chainer with MIT License 6 votes vote down vote up
def test_serialize(self):
        l1 = links.Linear(None, 1)
        l2 = links.Linear(None, 3)
        with l2.init_scope():
            l2.x = variable.Parameter(0, 2)
        s1 = chainer.Sequential(l1, l2)
        mocks = {'0': mock.MagicMock(), '1': mock.MagicMock()}
        serializer = mock.MagicMock()
        serializer.__getitem__.side_effect = lambda k: mocks[k]
        serializer.return_value = None
        mocks['0'].return_value = None
        mocks['1'].return_value = None
        s1.serialize(serializer)

        self.assertEqual(serializer.call_count, 0)
        self.assertEqual(serializer.__getitem__.call_count, 2)
        serializer.__getitem__.assert_any_call('0')
        serializer.__getitem__.assert_any_call('1')

        mocks['0'].assert_any_call('W', None)
        mocks['0'].assert_any_call('b', l1.b.data)
        mocks['1'].assert_any_call('W', None)
        mocks['1'].assert_any_call('b', l2.b.data)
        mocks['1'].assert_any_call('x', l2.x.data) 
Example #10
Source File: test_sequential.py    From chainer with MIT License 6 votes vote down vote up
def test_copyparams(self):
        l1 = links.Linear(None, 3)
        l2 = links.Linear(3, 2)
        l3 = links.Linear(2, 3)
        s1 = chainer.Sequential(l1, l2)
        s2 = chainer.Sequential(s1, l3)
        l1.b.data.fill(0)
        l2.W.data.fill(1)
        l2.b.data.fill(2)
        l3.W.data.fill(3)
        l3.b.data.fill(4)

        self.s2.copyparams(s2)

        numpy.testing.assert_array_equal(self.l1.b.data, l1.b.data)
        numpy.testing.assert_array_equal(self.l2.W.data, l2.W.data)
        numpy.testing.assert_array_equal(self.l2.b.data, l2.b.data)
        numpy.testing.assert_array_equal(self.l3.W.data, l3.W.data)
        numpy.testing.assert_array_equal(self.l3.b.data, l3.b.data) 
Example #11
Source File: test_branched.py    From chainerrl with MIT License 6 votes vote down vote up
def test_manual(self):
        link1 = L.Linear(2, 3)
        link2 = L.Linear(2, 5)
        link3 = chainer.Sequential(
            L.Linear(2, 7),
            F.tanh,
        )
        plink = Branched(link1, link2, link3)
        x = np.zeros((self.batch_size, 2), dtype=np.float32)
        pout = plink(x)
        self.assertIsInstance(pout, tuple)
        self.assertEqual(len(pout), 3)
        out1 = link1(x)
        out2 = link2(x)
        out3 = link3(x)
        np.testing.assert_allclose(pout[0].array, out1.array)
        np.testing.assert_allclose(pout[1].array, out2.array)
        np.testing.assert_allclose(pout[2].array, out3.array) 
Example #12
Source File: stateless_recurrent.py    From chainerrl with MIT License 6 votes vote down vote up
def n_step_forward(self, x, recurrent_state):
        """Multi-step batch forward computation.

        This method sequentially applies layers as chainer.Sequential does.

        Args:
            x (list): Input sequences. Each sequence should be a variable whose
                first axis corresponds to time or a tuple of such variables.
            recurrent_state (object): Batched recurrent state. If set to None,
                it is initialized.
            output_mode (str): If set to 'concat', the output value is
                concatenated into a single large batch, which can be suitable
                for loss computation. If set to 'split', the output value is
                a list of output sequences.

        Returns:
            object: Output sequences. See the description of the `output_mode`
                argument.
            object: New batched recurrent state.
        """
        raise NotImplementedError 
Example #13
Source File: sequential.py    From chainer with MIT License 5 votes vote down vote up
def __add__(self, other):
        if isinstance(other, Sequential):
            ret = Sequential()
            for layer in self:
                ret.append(layer)
            for layer in other:
                ret.append(layer)
            return ret
        else:
            raise ValueError('add (+) operator supports only objects of '
                             'Sequential class, but {} is given.'.format(
                                 str(type(other)))) 
Example #14
Source File: glance.py    From chainer with MIT License 5 votes vote down vote up
def MLP(n_units, n_out):
    layer = ch.Sequential(L.Linear(n_units), F.relu)
    model = layer.repeat(2)
    model.append(L.Linear(n_out))

    return model 
Example #15
Source File: sequential.py    From chainer with MIT License 5 votes vote down vote up
def __iadd__(self, other):
        if isinstance(other, Sequential):
            for layer in other:
                self.append(layer)
        else:
            raise ValueError('add (+) operator supports only objects of '
                             'Sequential class, but {} is given.'.format(
                                 str(type(other))))
        return self 
Example #16
Source File: sequential.py    From chainer with MIT License 5 votes vote down vote up
def remove_by_layer_type(self, type_name):
        """Remove layers by layer type.

        This method removes layers from the Sequential object by the
        layer's class name or function name. If you want to remove a
        :class:`~Link`, the argument ``type_name`` should be its class name,
        e.g., :class:`~links.Linear` or :class:`~links.Convolution2D`, etc.
        If you want to remove a :class:`~Function` class or any other callable
        objects, ``type_name`` should be the function name, e.g., ``relu`` or
        ``reshape``, etc.

        Args:
            type_name (str): The name of a layer you want to remove.

        """

        names = []
        for layer in self:
            if isinstance(layer, _link.Link):
                name = layer.__class__.__name__
            else:
                name = layer.__name__
            names.append((name, layer))
        for _name, _layer in names:
            if type_name == _name:
                self.remove(_layer) 
Example #17
Source File: sequential.py    From chainer with MIT License 5 votes vote down vote up
def copy(self, mode='share'):
        ret = Sequential()
        for layer in self:
            if isinstance(layer, _link.Link):
                ret.append(layer.copy(mode))
            else:
                ret.append(copy.copy(layer))
        return ret 
Example #18
Source File: sequential.py    From chainer with MIT License 5 votes vote down vote up
def copyparams(self, link, copy_persistent=True):
        if not isinstance(link, Sequential):
            raise ValueError('Objects other than Sequential object cannot be '
                             'copied to a Sequential object.')
        for idx, child in enumerate(self):
            if isinstance(child, _link.Link):
                child.copyparams(link[idx], copy_persistent) 
Example #19
Source File: sequential.py    From chainer with MIT License 5 votes vote down vote up
def flatten(self):
        """Flatten nested :class:`~chainer.Sequential` links.

        This method flattens all the nested :class:`~chainer.Sequential` links
        inside this :class:`~chainer.Sequential` link.

        Returns:

            A flattened :class:`~chainer.Sequential` object.

        .. admonition:: Example

            .. code-block:: python

                >>> import chainer
                >>> import chainer.functions as F
                >>> import chainer.links as L
                >>> a = chainer.Sequential(L.Linear(None, 10), F.relu)
                >>> b = chainer.Sequential(L.Linear(None, 10), F.relu)
                >>> a.append(b)
                >>> print(a)  # Without flatten
                0       Linear  W(None) b(10,)
                1       relu
                2       Sequential      which has 2 layers
                >>> print(a.flatten())  # With flatten
                0       Linear  W(None) b(10,)
                1       relu
                2       Linear  W(None) b(10,)
                3       relu

        """
        ret = Sequential()
        for layer in self:
            if isinstance(layer, Sequential):
                ret.extend(layer.flatten())
            else:
                ret.append(layer)
        return ret 
Example #20
Source File: glance_chainerio.py    From pfio with MIT License 5 votes vote down vote up
def MLP(n_units, n_out):
    layer = ch.Sequential(L.Linear(n_units), F.relu)
    model = layer.repeat(2)
    model.append(L.Linear(n_out))

    return model 
Example #21
Source File: mask_refine.py    From models with MIT License 5 votes vote down vote up
def __init__(self):
        super(MaskRefine, self).__init__()
        with self.init_scope():
            self.v0 = chainer.Sequential(
                    Conv2DActiv(64, 16, ksize=3, pad=1),
                    Conv2DActiv(16, 4, ksize=3, pad=1),
            )
            self.v1 = chainer.Sequential(
                    Conv2DActiv(256, 64, ksize=3, pad=1),
                    Conv2DActiv(64, 16, ksize=3, pad=1),
            )
            self.v2 = chainer.Sequential(
                    Conv2DActiv(512, 128, ksize=3, pad=1),
                    Conv2DActiv(128, 32, ksize=3, pad=1),
            )

            self.h2 = chainer.Sequential(
                    Conv2DActiv(32, 32, ksize=3, pad=1),
                    Conv2DActiv(32, 32, ksize=3, pad=1),
            )
            self.h1 = chainer.Sequential(
                    Conv2DActiv(16, 16, ksize=3, pad=1),
                    Conv2DActiv(16, 16, ksize=3, pad=1),
            )
            self.h0 = chainer.Sequential(
                    Conv2DActiv(4, 4, ksize=3, pad=1),
                    Conv2DActiv(4, 4, ksize=3, pad=1),
            )

            self.deconv = L.Deconvolution2D(256, 32, ksize=15, stride=15)
            self.post0 = L.Convolution2D(32, 16, ksize=3, pad=1)
            self.post1 = L.Convolution2D(16, 4, ksize=3, pad=1)
            self.post2 = L.Convolution2D(4, 1, ksize=3, pad=1) 
Example #22
Source File: __init__.py    From models with MIT License 5 votes vote down vote up
def build_decoder(vocab_size, N=6, model_size=512, ff_size=2048, num_heads=8, dropout_ratio=0.1):
    """
        Convenience function that returns the decoder, together with the embedding layer.
        Code using this function is expected to embed the input to the decoder by itself, using
        the supplied encoder Chain.
    :param vocab_size: the number of classes
    :param N: stack size of the decoder
    :param model_size: the number of hidden units in the transformer
    :param ff_size: the number of hidden units in the PositionwiseFeedForward part of the decoder
    :param num_heads: number of attention heads in the attention parts of the model
    :param dropout_ratio: dropout ratio for regularization
    :return: a tuple of two Chains. The first Chain is used for embedding the input to the decoder, the second is
    the decoder itself.
    """
    attention = MultiHeadedAttention(num_heads, model_size, dropout_ratio=dropout_ratio)
    feed_forward = PositionwiseFeedForward(model_size, ff_size, dropout_ratio=dropout_ratio)
    positional_encoding = PositionalEncoding(model_size, dropout_ratio=dropout_ratio)

    decoder_layer = DecoderLayer(
        model_size,
        copy.deepcopy(attention),
        copy.deepcopy(attention),
        feed_forward,
        dropout_ratio=dropout_ratio
    )

    decoder = Decoder(decoder_layer, N)

    embeddings = Embedding(model_size, vocab_size)

    return chainer.Sequential(embeddings, positional_encoding), decoder 
Example #23
Source File: test_export_testcase.py    From onnx-chainer with MIT License 5 votes vote down vote up
def model():
    return chainer.Sequential(
        L.Convolution2D(None, 16, 5, 1, 2),
        F.relu,
        L.Convolution2D(16, 8, 5, 1, 2),
        F.relu,
        L.Convolution2D(8, 5, 5, 1, 2),
        F.relu,
        L.Linear(None, 100),
        L.BatchNormalization(100),
        F.relu,
        L.Linear(100, 10)
    ) 
Example #24
Source File: test_arrays.py    From onnx-chainer with MIT License 5 votes vote down vote up
def test_output(self, name, slices):
        skip_opsets = None
        if name.startswith('gathernd'):
            skip_opsets = tuple(range(7, 11))
        name = 'get_item_' + name

        model = chainer.Sequential(
            lambda x: F.get_item(x, slices=slices))
        x = input_generator.increasing(2, 3, 4)

        self.expect(
            model, x, name=name, expected_num_initializers=0,
            skip_opset_version=skip_opsets) 
Example #25
Source File: test_arrays.py    From onnx-chainer with MIT License 5 votes vote down vote up
def test_get_item_error(slices):
    model = chainer.Sequential(
        lambda x: F.get_item(x, slices=slices))
    x = input_generator.increasing(2, 3, 4)

    with pytest.raises(ValueError):
        export(model, x) 
Example #26
Source File: test_arrays.py    From onnx-chainer with MIT License 5 votes vote down vote up
def test_output(self):
        model = chainer.Sequential(
            F.where
        )
        cond = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.bool)
        x = input_generator.increasing(2, 3)
        y = np.zeros((2, 3), np.float32)
        self.expect(model, (cond, x, y), skip_opset_version=[7, 8]) 
Example #27
Source File: test_inout.py    From onnx-chainer with MIT License 5 votes vote down vote up
def test_invalid_customized_input_shape(x_shape, shape_option):
    model = chainer.Sequential(F.relu)

    if isinstance(x_shape, tuple):
        xs = np.zeros(x_shape, dtype=np.float32)
    elif isinstance(x_shape, list):
        xs = tuple(
            np.zeros(shape, dtype=np.float32) for shape in x_shape)
    else:
        assert isinstance(x_shape, dict)
        xs = {k: np.zeros(shape, dtype=np.float32) for
              k, shape in x_shape.items()}

    with pytest.raises(ValueError):
        export(model, xs, input_shapes=shape_option) 
Example #28
Source File: yolo_v3.py    From chainercv with MIT License 5 votes vote down vote up
def __init__(self, n_fg_class=None, pretrained_model=None):
        super(YOLOv3, self).__init__()

        param, path = utils.prepare_pretrained_model(
            {'n_fg_class': n_fg_class}, pretrained_model, self._models)

        self.n_fg_class = param['n_fg_class']
        self.use_preset('visualize')

        with self.init_scope():
            self.extractor = Darknet53Extractor()
            self.subnet = chainer.ChainList()

        for i, n in enumerate((512, 256, 128)):
            self.subnet.append(chainer.Sequential(
                Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu),
                Convolution2D(
                    len(self._anchors[i]) * (4 + 1 + self.n_fg_class), 1)))

        default_bbox = []
        step = []
        for k, grid in enumerate(self.extractor.grids):
            for v, u in itertools.product(range(grid), repeat=2):
                for h, w in self._anchors[k]:
                    default_bbox.append((v, u, h, w))
                    step.append(self.insize / grid)
        self._default_bbox = np.array(default_bbox, dtype=np.float32)
        self._step = np.array(step, dtype=np.float32)

        if path:
            chainer.serializers.load_npz(path, self, strict=False) 
Example #29
Source File: __init__.py    From kiss with GNU General Public License v3.0 5 votes vote down vote up
def get_encoder_decoder(src_vocab_size, tgt_vocab_size, N=6, model_size=512, ff_size=2048, num_heads=8, dropout_ratio=0.1):
    attention = MultiHeadedAttention(num_heads, model_size, dropout_ratio=dropout_ratio)
    feed_forward = PositionwiseFeedForward(model_size, ff_size, dropout_ratio=dropout_ratio)
    positional_encoding = PositionalEncoding(model_size, dropout_ratio=dropout_ratio)

    encoder_layer = EncoderLayer(
        model_size,
        copy.deepcopy(attention),
        copy.deepcopy(feed_forward),
        dropout_ratio=dropout_ratio
    )
    encoder = Encoder(encoder_layer, N)

    decoder_layer = DecoderLayer(
        model_size,
        copy.deepcopy(attention),
        copy.deepcopy(attention),
        feed_forward,
        dropout_ratio=dropout_ratio
    )
    decoder = Decoder(decoder_layer, N)

    src_embeddings = Embedding(model_size, src_vocab_size)
    tgt_embeddings = Embedding(model_size, tgt_vocab_size)

    src_embeddings = chainer.Sequential(src_embeddings, positional_encoding)
    tgt_embeddings = chainer.Sequential(tgt_embeddings, positional_encoding)

    model = EncoderDecoder(
        encoder,
        decoder,
        src_embeddings,
        tgt_embeddings
    )

    return model 
Example #30
Source File: __init__.py    From kiss with GNU General Public License v3.0 5 votes vote down vote up
def get_conv_feature_encoder_decoder(vocab_size, N=6, model_size=512, ff_size=2048, num_heads=8, dropout_ratio=0.1):
    attention = MultiHeadedAttention(num_heads, model_size, dropout_ratio=dropout_ratio)
    feed_forward = PositionwiseFeedForward(model_size, ff_size, dropout_ratio=dropout_ratio)
    positional_encoding = PositionalEncoding(model_size, dropout_ratio=dropout_ratio)

    encoder_layer = EncoderLayer(
        model_size,
        copy.deepcopy(attention),
        copy.deepcopy(feed_forward),
        dropout_ratio=dropout_ratio
    )
    encoder = Encoder(encoder_layer, N)

    decoder_layer = DecoderLayer(
        model_size,
        copy.deepcopy(attention),
        copy.deepcopy(attention),
        feed_forward,
        dropout_ratio=dropout_ratio
    )
    decoder = Decoder(decoder_layer, N)

    embeddings = Embedding(model_size, vocab_size)

    tgt_embeddings = chainer.Sequential(embeddings, positional_encoding)
    src_embeddings = positional_encoding

    model = EncoderDecoder(
        encoder,
        decoder,
        src_embeddings,
        tgt_embeddings
    )

    return model