Python blocks.initialization.Uniform() Examples

The following are 4 code examples of blocks.initialization.Uniform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module blocks.initialization , or try the search function .
Example #1
Source File: test_initialization.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def test_uniform():
    rng = numpy.random.RandomState(1)

    def check_uniform(rng, mean, width, std, shape):
        weights = Uniform(mean=mean, width=width,
                          std=std).generate(rng, shape)
        assert weights.shape == shape
        assert weights.dtype == theano.config.floatX
        assert_allclose(weights.mean(), mean, atol=1e-2)
        if width is not None:
            std_ = width / numpy.sqrt(12)
        else:
            std_ = std
        assert_allclose(std_, weights.std(), atol=1e-2)
    yield check_uniform, rng, 0, 0.05, None, (500, 600)
    yield check_uniform, rng, 0, None, 0.001, (600, 500)
    yield check_uniform, rng, 5, None, 0.004, (700, 300)

    assert_raises(ValueError, Uniform, 0, 1, 1) 
Example #2
Source File: test_sequence_generator2.py    From blocks-extras with MIT License 6 votes vote down vote up
def setUp(self):
        self.readout = SoftmaxReadout(
            input_names=['states1', 'states2'],
            num_tokens=4, input_dims=[2, 3],
            weights_init=Uniform(width=1.0),
            biases_init=Uniform(width=1.0),
            seed=1)
        self.readout.initialize()

        self.states1 = numpy.array(
            [[[1., 2.]], [[2., 1.]]],
            dtype=theano.config.floatX)
        self.states2 = numpy.array(
            [[[3., 4., 5.]], [[5., 4., 3.]]],
            dtype=theano.config.floatX)
        self.merged = (
            self.states1.dot(self.readout.merge.children[0].W.get_value()) +
            self.states2.dot(self.readout.merge.children[1].W.get_value()) +
            self.readout.post_merge.parameters[0].get_value()) 
Example #3
Source File: model.py    From blocks-char-rnn with MIT License 5 votes vote down vote up
def initialize(to_init):
    for bricks in to_init:
        bricks.weights_init = initialization.Uniform(width=0.08)
        bricks.biases_init = initialization.Constant(0)
        bricks.initialize() 
Example #4
Source File: def_autoencoder_training.py    From cpae with MIT License 4 votes vote down vote up
def initialize_data_and_model(config, train_phase, layout='dict'):
    c = config
    fuel_path = fuel.config.data_path[0]
    vocab_main = None
    vocab_keys = None
    if not c['encoder']:
        if not c['vocab_keys_path']:
            raise ValueError('Error: Should specify vocab_keys_path when no encoder')
        vocab_keys = Vocabulary(
            os.path.join(fuel.config.data_path[0], c['vocab_keys_path']))

        
    if c['vocab_path']:
        vocab_main = Vocabulary(
            os.path.join(fuel.config.data_path[0], c['vocab_path']))
    # TODO: change name of class LanguageModellingData... very ill-named.
    data = LanguageModellingData(c['data_path'], layout, vocab=vocab_main)

    vocab_main = data.vocab

    model = Seq2Seq(c['emb_dim'], c['dim'], c['num_input_words'],
                       c['num_output_words'], data.vocab,
                       proximity_coef = c['proximity_coef'],
                       proximity_distance = c['proximity_distance'],
                       encoder = c['encoder'],
                       decoder = c['decoder'],
                       shared_rnn = c['shared_rnn'],
                       translate_layer = c['translate_layer'],
                       word_dropout = c['word_dropout'],  
                       tied_in_out = c['tied_in_out'],
                       vocab_keys = vocab_keys,
                       reconstruction_coef = c['reconstruction_coef'],  
                       provide_targets = c['provide_targets'],
                       weights_init=Uniform(width=0.1),
                       biases_init=Constant(0.))
                       
    model.initialize()

    if c['embedding_path'] and ((train_phase or c['freeze_pretrained']) or
                                c['provide_targets']):
        if c['provide_targets'] and c['freeze_pretrained']:
            raise ValueError("Can't provide_targets and use freeze_pretrained."
                             "In that case, simply use freeze_pretrained")
                            
        # if encoder embeddings are frozen, then we should load them 
        # as they're not saved with the models parameters
        emb_full_path = os.path.join(fuel_path, c['embedding_path'])
        embedding_matrix = numpy.load(emb_full_path)
        if c['provide_targets']:
            model.set_def_embeddings(embedding_matrix, 'target')
            logger.debug("Pre-trained targets loaded")
        else:
            model.set_def_embeddings(embedding_matrix, 'main')
            logger.debug("Pre-trained encoder embeddings loaded")

    return data, model