Python keras_layer_normalization.LayerNormalization() Examples
The following are 8
code examples of keras_layer_normalization.LayerNormalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras_layer_normalization
, or try the search function
.
Example #1
Source File: xlnet.py From keras-xlnet with MIT License | 6 votes |
def get_custom_objects() -> dict: return { 'gelu': gelu, 'EmbeddingRet': EmbeddingRet, 'EmbeddingSim': EmbeddingSim, 'CreateMask': CreateMask, 'RestoreMask': RestoreMask, 'PositionalEmbedding': PositionalEmbedding, 'PermutationMask': PermutationMask, 'MaskEmbedding': MaskEmbedding, 'RelativeBias': RelativeBias, 'SegmentBias': SegmentBias, 'RelativeSegmentEmbedding': RelativeSegmentEmbedding, 'Memory': Memory, 'LayerNormalization': LayerNormalization, 'RelativePartialMultiHeadSelfAttention': Attention, 'FeedForward': FeedForward, }
Example #2
Source File: model.py From keras-gpt-2 with MIT License | 5 votes |
def _wrap_layer(name, input_layer, build_func, trainable=True): """Wrap layers with normalization and residual. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param build_func: A callable that takes the input tensor and generates the output tensor. :param trainable: Whether the layers are trainable. :return: Output layer. """ normal_layer = LayerNormalization( trainable=trainable, name='%s-Norm' % name, )(input_layer) build_output = build_func(normal_layer) return keras.layers.Add(name='%s-Add' % name)([input_layer, build_output])
Example #3
Source File: test_layer_normalization.py From keras-layer-normalization with MIT License | 5 votes |
def test_save_load_json(self): model = keras.models.Sequential() model.add(LayerNormalization(input_shape=(2, 3))) model.compile(optimizer='adam', loss='mse') encoded = model.to_json() model = keras.models.model_from_json(encoded, custom_objects={'LayerNormalization': LayerNormalization}) model.summary()
Example #4
Source File: transformer.py From keras-transformer with MIT License | 5 votes |
def get_custom_objects(): return { 'gelu': gelu, 'LayerNormalization': LayerNormalization, 'MultiHeadAttention': MultiHeadAttention, 'FeedForward': FeedForward, 'TrigPosEmbedding': TrigPosEmbedding, 'EmbeddingRet': EmbeddingRet, 'EmbeddingSim': EmbeddingSim, }
Example #5
Source File: transformer.py From keras-transformer with MIT License | 5 votes |
def _wrap_layer(name, input_layer, build_func, dropout_rate=0.0, trainable=True): """Wrap layers with residual, normalization and dropout. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param build_func: A callable that takes the input tensor and generates the output tensor. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ build_output = build_func(input_layer) if dropout_rate > 0.0: dropout_layer = keras.layers.Dropout( rate=dropout_rate, name='%s-Dropout' % name, )(build_output) else: dropout_layer = build_output if isinstance(input_layer, list): input_layer = input_layer[0] add_layer = keras.layers.Add(name='%s-Add' % name)([input_layer, dropout_layer]) normal_layer = LayerNormalization( trainable=trainable, name='%s-Norm' % name, )(add_layer) return normal_layer
Example #6
Source File: transformer_xl.py From keras-transformer-xl with MIT License | 5 votes |
def get_custom_objects(): return { 'AdaptiveEmbedding': AdaptiveEmbedding, 'AdaptiveSoftmax': AdaptiveSoftmax, 'Scale': Scale, 'Memory': Memory, 'LayerNormalization': LayerNormalization, 'FeedForward': FeedForward, 'PositionalEmbedding': PositionalEmbedding, 'RelativeBias': RelativeBias, 'RelativePartialMultiHeadSelfAttention': RelativePartialMultiHeadSelfAttention, }
Example #7
Source File: test_layer_normalization.py From keras-layer-normalization with MIT License | 4 votes |
def test_sample(self): input_layer = keras.layers.Input( shape=(2, 3), name='Input', ) norm_layer = LayerNormalization( name='Layer-Normalization', )(input_layer) model = keras.models.Model( inputs=input_layer, outputs=norm_layer, ) model.compile( optimizer='adam', loss='mse', metrics={}, ) model.summary() inputs = np.array([[ [0.2, 0.1, 0.3], [0.5, 0.1, 0.1], ]]) predict = model.predict(inputs) expected = np.asarray([[ [0.0, -1.22474487, 1.22474487], [1.41421356, -0.707106781, -0.707106781], ]]) self.assertTrue(np.allclose(expected, predict), predict) input_layer = keras.layers.Input( shape=(10, 256), name='Input', ) norm_layer = LayerNormalization( name='Layer-Normalization', beta_initializer='ones', )(input_layer) model = keras.models.Model( inputs=input_layer, outputs=norm_layer, ) model.compile( optimizer='adam', loss='mse', metrics={}, ) model.summary() inputs = np.zeros((2, 10, 256)) predict = model.predict(inputs) expected = np.ones((2, 10, 256)) self.assertTrue(np.allclose(expected, predict))
Example #8
Source File: test_layer_normalization.py From keras-layer-normalization with MIT License | 4 votes |
def test_fit_zeros(self): def _leaky_relu(x): return keras.activations.relu(x, alpha=0.01) input_layer = keras.layers.Input( shape=(2, 3), name='Input', ) norm_layer = LayerNormalization( name='Layer-Normalization-1', trainable=False, )(input_layer) att_layer = MultiHeadAttention( head_num=3, activation=_leaky_relu, name='Multi-Head-Attentions' )(norm_layer) dense_layer = keras.layers.Dense(units=3, name='Dense-1')(att_layer) norm_layer = LayerNormalization( name='Layer-Normalization-2', trainable=False, )(dense_layer) dense_layer = keras.layers.Dense(units=3, name='Dense-2')(norm_layer) model = keras.models.Model( inputs=input_layer, outputs=dense_layer, ) model.compile( optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse', metrics={}, ) model.summary() def _generator_zeros(batch_size=32): while True: batch_inputs = np.zeros((batch_size, 2, 3)) batch_outputs = np.asarray([[[0.0, -0.1, 0.2]] * 2] * batch_size) yield batch_inputs, batch_outputs model.fit_generator( generator=_generator_zeros(), steps_per_epoch=1000, epochs=10, validation_data=_generator_zeros(), validation_steps=100, callbacks=[ keras.callbacks.EarlyStopping(monitor='val_loss', patience=5) ], ) for inputs, _ in _generator_zeros(batch_size=3): predicts = model.predict(inputs) expect = np.round(np.asarray([[[0.0, -0.1, 0.2]] * 2] * 3), decimals=1) actual = np.round(predicts, decimals=1) self.assertTrue(np.allclose(expect, actual), (expect, actual)) break