Python blocks.roles.WEIGHT Examples
The following are 10
code examples of blocks.roles.WEIGHT().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
blocks.roles
, or try the search function
.
Example #1
Source File: recurrent.py From attention-lvcsr with MIT License | 6 votes |
def _allocate(self): self.W_state = shared_floatx_nans((self.dim, 4*self.dim), name='W_state') self.W_cell_to_in = shared_floatx_nans((self.dim,), name='W_cell_to_in') self.W_cell_to_forget = shared_floatx_nans((self.dim,), name='W_cell_to_forget') self.W_cell_to_out = shared_floatx_nans((self.dim,), name='W_cell_to_out') # The underscore is required to prevent collision with # the `initial_state` application method self.initial_state_ = shared_floatx_zeros((self.dim,), name="initial_state") self.initial_cells = shared_floatx_zeros((self.dim,), name="initial_cells") add_role(self.W_state, WEIGHT) add_role(self.W_cell_to_in, WEIGHT) add_role(self.W_cell_to_forget, WEIGHT) add_role(self.W_cell_to_out, WEIGHT) add_role(self.initial_state_, INITIAL_STATE) add_role(self.initial_cells, INITIAL_STATE) self.parameters = [ self.W_state, self.W_cell_to_in, self.W_cell_to_forget, self.W_cell_to_out, self.initial_state_, self.initial_cells]
Example #2
Source File: bricks.py From Associative_LSTM with MIT License | 5 votes |
def _allocate(self): self.W_state = shared_floatx_nans((self.dim, 4.5 * self.dim), name='W_state') # The underscore is required to prevent collision with # the `initial_state` application method self.initial_state_ = shared_floatx_zeros((self.dim,), name="initial_state") self.initial_cells = shared_floatx_zeros((self.num_copies, self.dim), name="initial_cells") add_role(self.W_state, WEIGHT) # add_role(self.initial_state_, INITIAL_STATE) # add_role(self.initial_cells, INITIAL_STATE) self.parameters = [self.W_state]
Example #3
Source File: bricks.py From Associative_LSTM with MIT License | 5 votes |
def _allocate(self): self.W_state = shared_floatx_nans((self.dim, 4 * self.dim), name='W_state') # The underscore is required to prevent collision with # the `initial_state` application method self.initial_state_ = shared_floatx_zeros((self.dim,), name="initial_state") self.initial_cells = shared_floatx_zeros((self.dim,), name="initial_cells") add_role(self.W_state, WEIGHT) add_role(self.initial_state_, INITIAL_STATE) add_role(self.initial_cells, INITIAL_STATE) self.parameters = [ self.W_state, self.initial_state_, self.initial_cells]
Example #4
Source File: recurrent.py From attention-lvcsr with MIT License | 5 votes |
def _allocate(self): self.parameters.append(shared_floatx_nans((self.dim, self.dim), name="W")) add_role(self.parameters[0], WEIGHT) self.parameters.append(shared_floatx_zeros((self.dim,), name="initial_state")) add_role(self.parameters[1], INITIAL_STATE)
Example #5
Source File: recurrent.py From attention-lvcsr with MIT License | 5 votes |
def _allocate(self): self.parameters.append(shared_floatx_nans((self.dim, self.dim), name='state_to_state')) add_role(self.parameters[-1], WEIGHT) self.parameters.append(shared_floatx_nans((self.dim, 2 * self.dim), name='state_to_gates')) add_role(self.parameters[-1], WEIGHT) self.parameters.append(shared_floatx_nans((self.dim,), name="initial_state")) add_role(self.parameters[-1], INITIAL_STATE)
Example #6
Source File: simple.py From attention-lvcsr with MIT License | 5 votes |
def _allocate(self): W = shared_floatx_nans((self.input_dim, self.output_dim), name='W') add_role(W, WEIGHT) self.parameters.append(W) self.add_auxiliary_variable(W.norm(2), name='W_norm') if self.use_bias: b = shared_floatx_nans((self.output_dim,), name='b') add_role(b, BIAS) self.parameters.append(b) self.add_auxiliary_variable(b.norm(2), name='b_norm')
Example #7
Source File: lookup.py From attention-lvcsr with MIT License | 5 votes |
def _allocate(self): self.parameters.append(shared_floatx_nans((self.length, self.dim), name='W')) add_role(self.parameters[-1], WEIGHT)
Example #8
Source File: ladder.py From ladder with MIT License | 5 votes |
def weight(self, init, name, cast_float32=True, for_conv=False): weight = self.shared(init, name, cast_float32, role=WEIGHT) if for_conv: return weight.dimshuffle('x', 0, 'x', 'x') return weight
Example #9
Source File: model.py From blocks-examples with MIT License | 5 votes |
def _allocate(self): self.parameters.append(shared_floatx_nans((self.dim, self.dim), name='state_to_state')) self.parameters.append(shared_floatx_nans((self.dim, 2 * self.dim), name='state_to_gates')) for i in range(2): if self.parameters[i]: add_role(self.parameters[i], WEIGHT)
Example #10
Source File: __init__.py From blocks-examples with MIT License | 4 votes |
def main(save_to, num_epochs): mlp = MLP([Tanh(), Softmax()], [784, 100, 10], weights_init=IsotropicGaussian(0.01), biases_init=Constant(0)) mlp.initialize() x = tensor.matrix('features') y = tensor.lmatrix('targets') probs = mlp.apply(x) cost = CategoricalCrossEntropy().apply(y.flatten(), probs) error_rate = MisclassificationRate().apply(y.flatten(), probs) cg = ComputationGraph([cost]) W1, W2 = VariableFilter(roles=[WEIGHT])(cg.variables) cost = cost + .00005 * (W1 ** 2).sum() + .00005 * (W2 ** 2).sum() cost.name = 'final_cost' mnist_train = MNIST(("train",)) mnist_test = MNIST(("test",)) algorithm = GradientDescent( cost=cost, parameters=cg.parameters, step_rule=Scale(learning_rate=0.1)) extensions = [Timing(), FinishAfter(after_n_epochs=num_epochs), DataStreamMonitoring( [cost, error_rate], Flatten( DataStream.default_stream( mnist_test, iteration_scheme=SequentialScheme( mnist_test.num_examples, 500)), which_sources=('features',)), prefix="test"), TrainingDataMonitoring( [cost, error_rate, aggregation.mean(algorithm.total_gradient_norm)], prefix="train", after_epoch=True), Checkpoint(save_to), Printing()] if BLOCKS_EXTRAS_AVAILABLE: extensions.append(Plot( 'MNIST example', channels=[ ['test_final_cost', 'test_misclassificationrate_apply_error_rate'], ['train_total_gradient_norm']])) main_loop = MainLoop( algorithm, Flatten( DataStream.default_stream( mnist_train, iteration_scheme=SequentialScheme( mnist_train.num_examples, 50)), which_sources=('features',)), model=Model(cost), extensions=extensions) main_loop.run()