Python keras.engine.topology.Layer() Examples
The following are 15
code examples of keras.engine.topology.Layer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.engine.topology
, or try the search function
.
Example #1
Source File: my_layers.py From Attention-Based-Aspect-Extraction with Apache License 2.0 | 6 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Content Attention mechanism. Supports Masking. """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #2
Source File: my_layers.py From Aspect-level-sentiment with Apache License 2.0 | 6 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Content Attention mechanism. Supports Masking. """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #3
Source File: my_layers.py From IMN-E2E-ABSA with Apache License 2.0 | 6 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Content Attention mechanism. Supports Masking. """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #4
Source File: my_layers.py From IMN-E2E-ABSA with Apache License 2.0 | 6 votes |
def __init__(self, use_opinion, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Content Attention mechanism. Supports Masking. """ self.use_opinion = use_opinion self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Self_attention, self).__init__(**kwargs)
Example #5
Source File: my_layers.py From Unsupervised-Aspect-Extraction with Apache License 2.0 | 6 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Content Attention mechanism. Supports Masking. """ self.supports_masking = True self.init = initializations.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #6
Source File: layers.py From keras-utilities with MIT License | 5 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, return_attention=False, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Note: The layer has been tested with Keras 1.x Example: # 1 model.add(LSTM(64, return_sequences=True)) model.add(Attention()) # next add a Dense layer (for classification/regression) or whatever... # 2 - Get the attention scores hidden = LSTM(64, return_sequences=True)(words) sentence, word_scores = Attention(return_attention=True)(hidden) """ self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #7
Source File: motion_VAE2D.py From CNNArt with Apache License 2.0 | 5 votes |
def createModel(patchSize, dHyper, dParam): # input corrupted and non-corrupted image x_ref = Input(shape=(1, patchSize[0], patchSize[1])) x_art = Input(shape=(1, patchSize[0], patchSize[1])) encoded_ref, conv_1_ref = encode(x_ref, patchSize) encoded_art, conv_1_art = encode(x_art, patchSize) # concatenate the encoded features together conv_1 = concatenate([conv_1_ref, conv_1_art], axis=0) conv_2 = concatenate([encoded_ref, encoded_art], axis=0) # create the shared encoder z, z_mean, z_log_var, conv_3, conv_4 = encode_shared(conv_2, patchSize) # create the decoder decoded = decode(z, patchSize, conv_1, conv_2, conv_3, conv_4, dHyper['arch']) # separate the concatenated images decoded_ref2ref = Lambda(lambda input: input[:input.shape[0]//2, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded) decoded_art2ref = Lambda(lambda input: input[input.shape[0]//2:, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded) # input to CustomLoss Layer [decoded_ref2ref, decoded_art2ref] = CustomLossLayer(dHyper, patchSize, dParam)([x_ref, decoded_ref2ref, decoded_art2ref, z_log_var, z_mean]) # generate the VAE and encoder model vae = Model([x_ref, x_art], [decoded_ref2ref, decoded_art2ref]) return vae
Example #8
Source File: motion_VAEGAN2D.py From CNNArt with Apache License 2.0 | 5 votes |
def build_vae(patchSize, dHyper): # input corrupted and non-corrupted image x_ref = Input(shape=(1, patchSize[0], patchSize[1])) x_art = Input(shape=(1, patchSize[0], patchSize[1])) # create respective encoders encoded_ref = encode(x_ref, patchSize) encoded_art = encode(x_art, patchSize) # concatenate the encoded features together combined = concatenate([encoded_ref, encoded_art], axis=0) # create the shared encoder z, z_mean, z_log_var = encode_shared(combined, patchSize) # create the decoder decoded = decode(z, patchSize, dHyper['dropout']) # separate the concatenated images decoded_ref2ref = Lambda(lambda input: input[:input.shape[0]//2, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded) decoded_art2ref = Lambda(lambda input: input[input.shape[0]//2:, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded) # input to CustomLoss Layer [decoded_ref2ref, decoded_art2ref] = CustomLossLayer(dHyper, patchSize)([x_ref, decoded_ref2ref, decoded_art2ref, z_log_var, z_mean]) # generate the VAE and encoder model vae = Model([x_ref, x_art], [decoded_ref2ref, decoded_art2ref]) return vae
Example #9
Source File: submission_v40.py From Quora with MIT License | 5 votes |
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: (samples, steps, features). # Output shape 2D tensor with shape: (samples, features). :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. # noqa The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
Example #10
Source File: insample.py From Quora with MIT License | 5 votes |
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: (samples, steps, features). # Output shape 2D tensor with shape: (samples, features). :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. # noqa The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
Example #11
Source File: submission_v50.py From Quora with MIT License | 5 votes |
def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756] # Input shape 3D tensor with shape: (samples, steps, features). # Output shape 2D tensor with shape: (samples, features). :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. # noqa The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(Attention()) """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self).__init__(**kwargs)
Example #12
Source File: layers.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def build(self, input_shape): super(AttentionLSTM, self).build(input_shape) if hasattr(self.attention_vec, '_keras_shape'): attention_dim = self.attention_vec._keras_shape[1] else: raise Exception('Layer could not be build: No information about expected input shape.') self.U_a = self.inner_init((self.output_dim, self.output_dim), name='{}_U_a'.format(self.name)) self.b_a = K.zeros((self.output_dim,), name='{}_b_a'.format(self.name)) self.U_m = self.inner_init((attention_dim, self.output_dim), name='{}_U_m'.format(self.name)) self.b_m = K.zeros((self.output_dim,), name='{}_b_m'.format(self.name)) if self.single_attention_param: self.U_s = self.inner_init((self.output_dim, 1), name='{}_U_s'.format(self.name)) self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name)) else: self.U_s = self.inner_init((self.output_dim, self.output_dim), name='{}_U_s'.format(self.name)) self.b_s = K.zeros((self.output_dim,), name='{}_b_s'.format(self.name)) self.trainable_weights += [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s] if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
Example #13
Source File: layers.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def build(self, input_shape): assert len(input_shape) >= 3 self.input_spec = [InputSpec(shape=input_shape)] if not self.layer.built: self.layer.build(input_shape) self.layer.built = True super(AttentionLSTMWrapper, self).build() if hasattr(self.attention_vec, '_keras_shape'): attention_dim = self.attention_vec._keras_shape[1] else: raise Exception('Layer could not be build: No information about expected input shape.') self.U_a = self.layer.inner_init((self.layer.output_dim, self.layer.output_dim), name='{}_U_a'.format(self.name)) self.b_a = K.zeros((self.layer.output_dim,), name='{}_b_a'.format(self.name)) self.U_m = self.layer.inner_init((attention_dim, self.layer.output_dim), name='{}_U_m'.format(self.name)) self.b_m = K.zeros((self.layer.output_dim,), name='{}_b_m'.format(self.name)) if self.single_attention_param: self.U_s = self.layer.inner_init((self.layer.output_dim, 1), name='{}_U_s'.format(self.name)) self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name)) else: self.U_s = self.layer.inner_init((self.layer.output_dim, self.layer.output_dim), name='{}_U_s'.format(self.name)) self.b_s = K.zeros((self.layer.output_dim,), name='{}_b_s'.format(self.name)) self.trainable_weights = [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]
Example #14
Source File: layers.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, a complete ' 'input_shape must be provided ' '(including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim))]
Example #15
Source File: layers.py From Keras-progressive_growing_of_gans with MIT License | 5 votes |
def compute_output_shape(self, input_shape): return input_shape #---------------------------------------------------------------------------- # Layer normalization. Custom reimplementation based on the paper: # https://arxiv.org/abs/1607.06450