Python lasagne.init.Uniform() Examples
The following are 7
code examples of lasagne.init.Uniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.init
, or try the search function
.
Example #1
Source File: layers_theano.py From visual_dynamics with MIT License | 6 votes |
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, groups=1, W=init.Uniform(), b=init.Constant(0.), nonlinearity=nl.rectify, flip_filters=True, convolution=T.nnet.conv2d, filter_dilation=(1, 1), **kwargs): assert num_filters % groups == 0 self.groups = groups super(GroupConv2DLayer, self).__init__(incoming, num_filters, filter_size, stride=stride, pad=pad, untie_biases=untie_biases, W=W, b=b, nonlinearity=nonlinearity, flip_filters=flip_filters, convolution=convolution, filter_dilation=filter_dilation, **kwargs)
Example #2
Source File: dist.py From gelato with MIT License | 5 votes |
def __init__(self): super(FlatSpec, self).__init__(testval=init.Uniform(1))
Example #3
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def __init__(self, incoming, num_filters, filter_size, group=1, stride=(1, 1), pad="valid", untie_biases=False, W=init.Uniform(), b=init.Constant(0.), nonlinearity=nl.rectify,convolution=T.nnet.conv2d, **kwargs): super(CaffeConv2DLayer, self).__init__(incoming, num_filters, filter_size, stride=stride, pad=pad, untie_biases=untie_biases, W=W, b=b, nonlinearity=nonlinearity,convolution=convolution, **kwargs) self.group = group assert self.num_filters % self.group == 0
Example #4
Source File: test_layers_theano.py From visual_dynamics with MIT License | 5 votes |
def test_group_conv(x_shape, num_filters, groups, batch_size=2): X_var = T.tensor4('X') l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x') X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX) l_conv = LT.GroupConv2DLayer(l_x, num_filters, filter_size=3, stride=1, pad='same', untie_biases=True, groups=groups, nonlinearity=None, W=init.Uniform(), b=init.Uniform()) conv_var = L.get_output(l_conv) conv_fn = theano.function([X_var], conv_var) tic() conv = conv_fn(X) toc("conv time for x_shape=%r, num_filters=%r, groups=%r, batch_size=%r\n\t" % (x_shape, num_filters, groups, batch_size)) l_scan_conv = LT.ScanGroupConv2DLayer(l_x, num_filters, filter_size=3, stride=1, pad='same', untie_biases=True, groups=groups, nonlinearity=None, W=l_conv.W, b=l_conv.b) scan_conv_var = L.get_output(l_scan_conv) scan_conv_fn = theano.function([X_var], scan_conv_var) tic() scan_conv = scan_conv_fn(X) toc("scan_conv time for x_shape=%r, num_filters=%r, groups=%r, batch_size=%r\n\t" % (x_shape, num_filters, groups, batch_size)) assert np.allclose(conv, scan_conv)
Example #5
Source File: test_layers_theano.py From visual_dynamics with MIT License | 5 votes |
def test_bilinear_group_conv(x_shape, u_shape, batch_size=2): X_var = T.tensor4('X') U_var = T.matrix('U') l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x') l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u') X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX) U = np.random.random((batch_size,) + u_shape).astype(theano.config.floatX) l_xu_outer = LT.OuterProductLayer([l_x, l_u]) l_x_diff_pred = LT.GroupConv2DLayer(l_xu_outer, x_shape[0], filter_size=5, stride=1, pad='same', untie_biases=True, groups=x_shape[0], nonlinearity=None, W=init.Uniform(), b=init.Uniform()) X_diff_pred_var = L.get_output(l_x_diff_pred) X_diff_pred_fn = theano.function([X_var, U_var], X_diff_pred_var) X_diff_pred = X_diff_pred_fn(X, U) u_dim, = u_shape l_x_convs = [] for i in range(u_dim + 1): l_x_conv = LT.GroupConv2DLayer(l_x, x_shape[0], filter_size=5, stride=1, pad='same', untie_biases=True, groups=x_shape[0], nonlinearity=None, W=l_x_diff_pred.W.get_value()[:, i:i+1], b=l_x_diff_pred.b.get_value() if i == u_dim else None) l_x_convs.append(l_x_conv) l_x_diff_pred_bw = LT.BatchwiseSumLayer(l_x_convs + [l_u]) X_diff_pred_bw_var = L.get_output(l_x_diff_pred_bw) X_diff_pred_bw_fn = theano.function([X_var, U_var], X_diff_pred_bw_var) X_diff_pred_bw = X_diff_pred_bw_fn(X, U) assert np.allclose(X_diff_pred, X_diff_pred_bw, atol=1e-7)
Example #6
Source File: net_theano.py From visual_dynamics with MIT License | 4 votes |
def build_action_cond_encoder_net(input_shapes, **kwargs): x_shape, u_shape = input_shapes X_var = T.tensor4('X') U_var = T.matrix('U') X_diff_var = T.tensor4('X_diff') X_next_var = X_var + X_diff_var l_x0 = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x') l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u') l_x1 = L.Conv2DLayer(l_x0, 64, filter_size=6, stride=2, pad=0, nonlinearity=nl.rectify, name='x1') l_x2 = L.Conv2DLayer(l_x1, 64, filter_size=6, stride=2, pad=2, nonlinearity=nl.rectify, name='x2') l_x3 = L.Conv2DLayer(l_x2, 64, filter_size=6, stride=2, pad=2, nonlinearity=nl.rectify, name='x3') l_x3_shape = lasagne.layers.get_output_shape(l_x3) l_y4 = L.DenseLayer(l_x3, 1024, nonlinearity=nl.rectify, name='y') l_y4d = L.DenseLayer(l_y4, 2048, W=init.Uniform(1.0), nonlinearity=None) l_ud = L.DenseLayer(l_u, 2048, W=init.Uniform(0.1), nonlinearity=None) l_y4d_diff_pred = L.ElemwiseMergeLayer([l_y4d, l_ud], T.mul) l_y4_diff_pred = L.DenseLayer(l_y4d_diff_pred, 1024, W=init.Uniform(1.0), nonlinearity=None, name='y_diff_pred') l_y4_next_pred = L.ElemwiseMergeLayer([l_y4, l_y4_diff_pred], T.add, name='y_next_pred') l_y3_next_pred = L.DenseLayer(l_y4_next_pred, np.prod(l_x3_shape[1:]), nonlinearity=nl.rectify) l_x3_next_pred = L.ReshapeLayer(l_y3_next_pred, ([0],) + l_x3_shape[1:], name='x3_next_pred') l_x2_next_pred = LT.Deconv2DLayer(l_x3_next_pred, 64, filter_size=6, stride=2, pad=2, nonlinearity=nl.rectify, name='x2_next_pred') l_x1_next_pred = LT.Deconv2DLayer(l_x2_next_pred, 64, filter_size=6, stride=2, pad=2, nonlinearity=nl.rectify, name='x1_next_pred') l_x0_next_pred = LT.Deconv2DLayer(l_x1_next_pred, 3, filter_size=6, stride=2, pad=0, nonlinearity=None, name='x0_next_pred') loss_fn = lambda X, X_pred: ((X - X_pred) ** 2).mean(axis=0).sum() / 2. loss = loss_fn(X_next_var, lasagne.layers.get_output(l_x0_next_pred)) net_name = 'ActionCondEncoderNet' input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]]) pred_layers = OrderedDict([('x0_next_pred', l_x0_next_pred)]) return net_name, input_vars, pred_layers, loss
Example #7
Source File: tmp_dnn.py From kaggle-ndsb with MIT License | 4 votes |
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False, W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None, flip_filters=False): super(Conv2DDNNLayer, self).__init__(input_layer) if nonlinearity is None: self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity self.num_filters = num_filters self.filter_size = filter_size if isinstance(strides, int): strides = (strides, strides) self.strides = strides self.untie_biases = untie_biases self.flip_filters = flip_filters if border_mode is not None and pad is not None: raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.") elif border_mode is None and pad is None: # no option specified, default to valid mode self.pad = (0, 0) self.border_mode = 'valid' elif border_mode is not None: if border_mode == 'valid': self.pad = (0, 0) self.border_mode = 'valid' elif border_mode == 'full': self.pad = (self.filter_size[0] - 1, self.filter_size[1] - 1) self.border_mode = 'full' elif border_mode == 'same': # dnn_conv does not support same, so we just specify padding directly. # only works for odd filter size, but the even filter size case is probably not worth supporting. self.pad = ((self.filter_size[0] - 1) // 2, (self.filter_size[1] - 1) // 2) self.border_mode = None else: raise RuntimeError("Unsupported border_mode for Conv2DDNNLayer: %s" % border_mode) else: if isinstance(pad, int): pad = (pad, pad) self.pad = pad self.W = self.create_param(W, self.get_W_shape()) if b is None: self.b = None elif self.untie_biases: output_shape = self.get_output_shape() self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3])) else: self.b = self.create_param(b, (num_filters,))