Python keras.initializations.get() Examples

The following are 30 code examples of keras.initializations.get(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.initializations , or try the search function .
Example #1
Source File: gaborfitting.py    From agnez with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, input_dim, output_dim, octave=True):
        super(GaborFit, self).__init__()
        init0 = initializations.get('zero')
        init1 = initializations.get('uniform')
        xydim = np.sqrt(output_dim)
        x, y = np.meshgrid(*(np.linspace(-1, 1, xydim),)*2)
        self.x = theano.shared(x.ravel().astype(floatX))
        self.y = theano.shared(y.ravel().astype(floatX))
        self.x0 = init0((input_dim,))
        self.y0 = init0((input_dim,))
        self.theta = init0((input_dim,))
        self.omega = init1((input_dim,))
        self.input = tensor.matrix()
        if octave:
            self.kappa = 2.5
        else:
            self.kappa = np.pi
        self.params = [self.x0, self.y0, self.theta, self.omega] 
Example #2
Source File: layers.py    From research with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, output_dim, output_length,
               init='glorot_uniform', inner_init='orthogonal',
               activation='tanh',
               W_regularizer=None, U_regularizer=None, b_regularizer=None,
               dropout_W=0., dropout_U=0., **kwargs):
      self.output_dim = output_dim
      self.output_length = output_length
      self.init = initializations.get(init)
      self.inner_init = initializations.get(inner_init)
      self.activation = activations.get(activation)
      self.W_regularizer = regularizers.get(W_regularizer)
      self.U_regularizer = regularizers.get(U_regularizer)
      self.b_regularizer = regularizers.get(b_regularizer)
      self.dropout_W, self.dropout_U = dropout_W, dropout_U

      if self.dropout_W or self.dropout_U:
          self.uses_learning_phase = True
      super(DreamyRNN, self).__init__(**kwargs) 
Example #3
Source File: rtn.py    From ikelos with MIT License 6 votes vote down vote up
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='tanh', inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 shape_key=None, dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U
        self.shape_key = shape_key or {}

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        kwargs['consume_less'] = 'gpu'
        super(RTTN, self).__init__(**kwargs)
        
        self.num_actions = 4 
Example #4
Source File: ConvolutionalMaxOverTime.py    From deeplearning4nlp-tutorial with Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(ConvolutionalMaxOverTime, self).__init__(**kwargs) 
Example #5
Source File: attentive_convlstm.py    From sam with MIT License 6 votes vote down vote up
def __init__(self, nb_filters_in, nb_filters_out, nb_filters_att, nb_rows, nb_cols,
                 init='normal', inner_init='orthogonal', attentive_init='zero',
                 activation='tanh', inner_activation='sigmoid',
                 W_regularizer=None, U_regularizer=None,
                 weights=None, go_backwards=False,
                 **kwargs):
        self.nb_filters_in = nb_filters_in
        self.nb_filters_out = nb_filters_out
        self.nb_filters_att = nb_filters_att
        self.nb_rows = nb_rows
        self.nb_cols = nb_cols
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.attentive_init = initializations.get(attentive_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.initial_weights = weights
        self.go_backwards = go_backwards

        self.W_regularizer = W_regularizer
        self.U_regularizer = U_regularizer
        self.input_spec = [InputSpec(ndim=5)]

        super(AttentiveConvLSTM, self).__init__(**kwargs) 
Example #6
Source File: ConvolutionalMaxOverTime.py    From deeplearning4nlp-tutorial with Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(ConvolutionalMaxOverTime, self).__init__(**kwargs) 
Example #7
Source File: rhn.py    From deep-models with Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, L,
             init='glorot_uniform', inner_init='orthogonal',
             activation='tanh', inner_activation='hard_sigmoid',
             W_regularizer=None, U_regularizer=None, b_regularizer=None,
             dropout_W=0., dropout_U=0., **kwargs):
    self.output_dim = output_dim
    self.init = initializations.get(init)
    self.inner_init = initializations.get(inner_init)
    self.activation = activations.get(activation)
    self.inner_activation = activations.get(inner_activation)
    self.W_regularizer = regularizers.get(W_regularizer)
    self.U_regularizer = regularizers.get(U_regularizer)
    self.b_regularizer = regularizers.get(b_regularizer)
    self.dropout_W, self.dropout_U = dropout_W, dropout_U
    self.L = L

    if self.dropout_W or self.dropout_U:
        self.uses_learning_phase = True
    super(RHN, self).__init__(**kwargs) 
Example #8
Source File: convolutional.py    From LSTM_Anomaly_Detector with MIT License 6 votes vote down vote up
def __init__(self, nb_filter, stack_size, filter_length,
                 init='glorot_uniform', activation='linear', weights=None,
                 image_shape=None, border_mode='valid', subsample_length=1):
        super(Convolution1D, self).__init__()

        nb_row = 1
        nb_col = filter_length
        subsample = (1,subsample_length)
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.subsample = subsample
        self.border_mode = border_mode
        self.image_shape = image_shape
        self.nb_filter = nb_filter
        self.stack_size = stack_size

        self.input = T.tensor4()
        self.W_shape = (nb_filter, stack_size, nb_row, nb_col)
        self.W = self.init(self.W_shape)
        self.b = shared_zeros((nb_filter,))

        self.params = [self.W, self.b]

        if weights is not None:
            self.set_weights(weights) 
Example #9
Source File: layers.py    From research with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, output_dim, output_length, control_dim=2,
               init='glorot_uniform', inner_init='orthogonal',
               activation='tanh',
               W_regularizer=None, U_regularizer=None, b_regularizer=None,
               dropout_W=0., dropout_U=0., **kwargs):
      self.output_dim = output_dim
      self.output_length = output_length
      self.init = initializations.get(init)
      self.inner_init = initializations.get(inner_init)
      self.activation = activations.get(activation)
      self.W_regularizer = regularizers.get(W_regularizer)
      self.U_regularizer = regularizers.get(U_regularizer)
      self.b_regularizer = regularizers.get(b_regularizer)
      self.dropout_W, self.dropout_U = dropout_W, dropout_U
      self.control_dim = control_dim

      if self.dropout_W or self.dropout_U:
          self.uses_learning_phase = True
      super(CondDreamyRNN, self).__init__(**kwargs) 
Example #10
Source File: huffmax.py    From huffmax with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, nb_classes, frequency_table=None, mode=0, init='glorot_uniform', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None,
				 W_constraint=None, b_constraint=None,
				 bias=True, verbose=False, **kwargs):
		'''
		# Arguments:
		nb_classes: Number of classes.
		frequency_table: list. Frequency of each class. More frequent classes will have shorter huffman codes.
		mode: integer. One of [0, 1]
		verbose: boolean. Set to true to see the progress of building huffman tree. 
		'''
		self.nb_classes = nb_classes
		if frequency_table is None:
			frequency_table = [1] * nb_classes
		self.frequency_table = frequency_table
		self.mode = mode
		self.init = initializations.get(init)
		self.W_regularizer = regularizers.get(W_regularizer)
		self.b_regularizer = regularizers.get(b_regularizer)
		self.activity_regularizer = regularizers.get(activity_regularizer)
		self.W_constraint = constraints.get(W_constraint)
		self.b_constraint = constraints.get(b_constraint)
		self.bias = bias
		self.initial_weights = weights
		self.verbose = verbose
		super(Huffmax, self).__init__(**kwargs) 
Example #11
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 6 votes vote down vote up
def __init__(self, init='glorot_uniform',
                 U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
                 U_constraint=None, b_start_constraint=None, b_end_constraint=None,
                 weights=None,
                 **kwargs):
        self.supports_masking = True
        self.uses_learning_phase = True
        self.input_spec = [InputSpec(ndim=3)]
        self.init = initializations.get(init)

        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_start_regularizer = regularizers.get(b_start_regularizer)
        self.b_end_regularizer = regularizers.get(b_end_regularizer)
        self.U_constraint = constraints.get(U_constraint)
        self.b_start_constraint = constraints.get(b_start_constraint)
        self.b_end_constraint = constraints.get(b_end_constraint)

        self.initial_weights = weights

        super(ChainCRF, self).__init__(**kwargs) 
Example #12
Source File: lstm2ntm.py    From NTM-Keras with MIT License 6 votes vote down vote up
def __init__(self, output_dim, memory_dim=128, memory_size=20,
                 controller_output_dim=100, location_shift_range=1,
                 num_read_head=1, num_write_head=1,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, R_regularizer=None,
                 b_regularizer=None, W_y_regularizer=None,
                 W_xi_regularizer=None, W_r_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(NTM, self).__init__(**kwargs) 
Example #13
Source File: rtn.py    From ikelos with MIT License 6 votes vote down vote up
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(DualCurrent, self).__init__(**kwargs) 
Example #14
Source File: recurrent.py    From keras_bn_library with MIT License 6 votes vote down vote up
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
		self.output_dim = output_dim
		self.init = initializations.get(init)
		self.inner_init = initializations.get(inner_init)
		self.forget_bias_init = initializations.get(forget_bias_init)
		self.activation = activations.get(activation)
		self.inner_activation = activations.get(inner_activation)
		self.W_regularizer = regularizers.get(W_regularizer)
		self.U_regularizer = regularizers.get(U_regularizer)
		self.b_regularizer = regularizers.get(b_regularizer)
		self.dropout_W = dropout_W
		self.dropout_U = dropout_U
		self.stateful = False

		if self.dropout_W or self.dropout_U:
			self.uses_learning_phase = True
		super(QRNN, self).__init__(**kwargs) 
Example #15
Source File: recurrent.py    From keras_bn_library with MIT License 6 votes vote down vote up
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):

		self.output_dim = output_dim
		self.init = initializations.get(init)
		self.inner_init = initializations.get(inner_init)
		self.forget_bias_init = initializations.get(forget_bias_init)
		self.activation = activations.get(activation)
		self.inner_activation = activations.get(inner_activation)
		self.W_regularizer = regularizers.get(W_regularizer)
		self.U_regularizer = regularizers.get(U_regularizer)
		self.b_regularizer = regularizers.get(b_regularizer)
		self.dropout_W, self.dropout_U = dropout_W, dropout_U

		if self.dropout_W or self.dropout_U:
			self.uses_learning_phase = True
		super(DecoderVaeLSTM, self).__init__(**kwargs) 
Example #16
Source File: my_layers.py    From Unsupervised-Aspect-Extraction with Apache License 2.0 6 votes vote down vote up
def __init__(self, W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        """
        Keras Layer that implements an Content Attention mechanism.
        Supports Masking.
        """
        self.supports_masking = True
        self.init = initializations.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(Attention, self).__init__(**kwargs) 
Example #17
Source File: my_layers.py    From Unsupervised-Aspect-Extraction with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dim, output_dim,
                 init='uniform', input_length=None,
                 W_regularizer=None, activity_regularizer=None,
                 W_constraint=None,
                 weights=None, dropout=0., **kwargs):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.input_length = input_length
        self.dropout = dropout

        self.W_constraint = constraints.get(W_constraint)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        if 0. < self.dropout < 1.:
            self.uses_learning_phase = True
        self.initial_weights = weights
        kwargs['input_shape'] = (self.input_length,)
        kwargs['input_dtype'] = K.floatx()
        super(WeightedAspectEmb, self).__init__(**kwargs) 
Example #18
Source File: eltwise_product.py    From mlnet with MIT License 6 votes vote down vote up
def __init__(self, downsampling_factor=10, init='glorot_uniform', activation='linear',
                 weights=None, W_regularizer=None, activity_regularizer=None,
                 W_constraint=None, input_dim=None, **kwargs):

        self.downsampling_factor = downsampling_factor
        self.init = initializations.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)

        self.input_spec = [InputSpec(ndim=4)]
        super(EltWiseProduct, self).__init__(**kwargs) 
Example #19
Source File: FixedEmbedding.py    From deeplearning4nlp-tutorial with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dim, output_dim, init='uniform', input_length=None,
                 W_regularizer=None, activity_regularizer=None, W_constraint=None,
                 mask_zero=False, weights=None, **kwargs):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.input_length = input_length
        self.mask_zero = mask_zero

        self.W_constraint = constraints.get(W_constraint)
        self.constraints = [self.W_constraint]

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.initial_weights = weights
        kwargs['input_shape'] = (self.input_dim,)
        super(FixedEmbedding, self).__init__(**kwargs) 
Example #20
Source File: model.py    From hierarchical-attention-networks with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, init='glorot_uniform', **kwargs):
        super(AttentionLayer, self).__init__(**kwargs)
        self.supports_masking = True
        self.init = initializations.get(init) 
Example #21
Source File: layers.py    From asr-study with MIT License 5 votes vote down vote up
def __init__(self, epsilon=1e-5, weights=None, gain_init='one',
                 bias_init='zero', **kwargs):
        self.epsilon = epsilon
        self.gain_init = initializations.get(gain_init)
        self.bias_init = initializations.get(bias_init)
        self.initial_weights = weights
        self._logger = logging.getLogger('%s.%s' % (__name__,
                                                    self.__class__.__name__))

        super(LayerNormalization, self).__init__(**kwargs) 
Example #22
Source File: layers_utils.py    From asr-study with MIT License 5 votes vote down vote up
def multiplicative_integration_init(shape, alpha_init='one',
                                    beta1_init='one', beta2_init='one',
                                    name='mi', has_input=True):
    beta1 = initializations.get(beta1_init)(shape, name='%s_beta1' % name)
    if has_input:
        alpha = initializations.get(alpha_init)(shape, name='%s_alpha' % name)
        beta2 = initializations.get(beta2_init)(shape, name='%s_beta2' % name)
        return alpha, beta1, beta2

    return beta1 
Example #23
Source File: layers.py    From asr-study with MIT License 5 votes vote down vote up
def __init__(self, output_dim, depth=1,
                 init='glorot_uniform', inner_init='orthogonal',
                 bias_init=highway_bias_initializer,
                 activation='tanh', inner_activation='hard_sigmoid',
                 coupling=True, layer_norm=False, ln_gain_init='one',
                 ln_bias_init='zero', mi=False,
                 W_regularizer=None, U_regularizer=None,
                 b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.depth = depth
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.bias_init = initializations.get(bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.coupling = coupling
        self.has_layer_norm = layer_norm
        self.ln_gain_init = initializations.get(ln_gain_init)
        self.ln_bias_init = initializations.get(ln_bias_init)
        self.mi = mi
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        self._logger = logging.getLogger('%s.%s' % (__name__,
                                                    self.__class__.__name__))

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True

        super(RHN, self).__init__(**kwargs)

        if not self.consume_less == "gpu":
            self._logger.warning("Ignoring consume_less=%s. Setting to 'gpu'." % self.consume_less) 
Example #24
Source File: custom_layers.py    From DenseNet-Keras with MIT License 5 votes vote down vote up
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
        self.momentum = momentum
        self.axis = axis
        self.beta_init = initializations.get(beta_init)
        self.gamma_init = initializations.get(gamma_init)
        self.initial_weights = weights
        super(Scale, self).__init__(**kwargs) 
Example #25
Source File: densenet_121.py    From keras-FP16-test with Apache License 2.0 5 votes vote down vote up
def __init__(self, weights=None, axis=-1, momentum=0.9, beta_init='zero', gamma_init='one', **kwargs):
        self.momentum = momentum
        self.axis = axis
        self.beta_init = initializations.get(beta_init)
        self.gamma_init = initializations.get(gamma_init)
        self.initial_weights = weights
        super(Scale, self).__init__(**kwargs) 
Example #26
Source File: attention.py    From MusiteDeep with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self,hidden,init='glorot_uniform',activation='linear',W_regularizer=None,b_regularizer=None,W_constraint=None,**kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.W_constraint = constraints.get(W_constraint)
        self.hidden=hidden
        super(Attention, self).__init__(**kwargs) 
Example #27
Source File: PointerLSTM.py    From pointer-networks with MIT License 5 votes vote down vote up
def build(self, input_shape):
        super(PointerLSTM, self).build(input_shape)
        self.input_spec = [InputSpec(shape=input_shape)]
        init = initializations.get('orthogonal')
        self.W1 = init((self.hidden_shape, 1))
        self.W2 = init((self.hidden_shape, 1))
        self.vt = init((input_shape[1], 1))
        self.trainable_weights += [self.W1, self.W2, self.vt] 
Example #28
Source File: scale_layer.py    From cnn_finetune with MIT License 5 votes vote down vote up
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
        self.momentum = momentum
        self.axis = axis
        self.beta_init = initializations.get(beta_init)
        self.gamma_init = initializations.get(gamma_init)
        self.initial_weights = weights
        super(Scale, self).__init__(**kwargs) 
Example #29
Source File: textClassifierRNN.py    From textClassifier with Apache License 2.0 5 votes vote down vote up
def __init__(self, **kwargs):
        self.init = initializations.get('normal')
        #self.input_spec = [InputSpec(ndim=3)]
        super(AttLayer, self).__init__(**kwargs) 
Example #30
Source File: graph.py    From relational-gcn with MIT License 5 votes vote down vote up
def __init__(self, output_dim, support=1, featureless=False,
                 init='glorot_uniform', activation='linear',
                 weights=None, W_regularizer=None, num_bases=-1,
                 b_regularizer=None, bias=False, dropout=0., **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim  # number of features per node
        self.support = support  # filter support / number of weights
        self.featureless = featureless  # use/ignore input features
        self.dropout = dropout

        assert support >= 1

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.bias = bias
        self.initial_weights = weights
        self.num_bases = num_bases

        # these will be defined during build()
        self.input_dim = None
        self.W = None
        self.W_comp = None
        self.b = None
        self.num_nodes = None

        super(GraphConvolution, self).__init__(**kwargs)