Python tensorflow.keras.layers.Permute() Examples
The following are 5
code examples of tensorflow.keras.layers.Permute().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.layers
, or try the search function
.
Example #1
Source File: layers.py From attention-mechanisms with MIT License | 6 votes |
def call(self, inputs): # (B, S, H) # Expand weights to include batch size through implicit broadcasting W1, W2 = self.W1[None, :, :], self.W2[None, :, :] hidden_states_transposed = Permute(dims=(2, 1))(inputs) # (B, H, S) attention_score = tf.matmul(W1, hidden_states_transposed) # (B, size, S) attention_score = Activation('tanh')(attention_score) # (B, size, S) attention_weights = tf.matmul(W2, attention_score) # (B, num_hops, S) attention_weights = Activation('softmax')(attention_weights) # (B, num_hops, S) embedding_matrix = tf.matmul(attention_weights, inputs) # (B, num_hops, H) embedding_matrix_flattened = Flatten()(embedding_matrix) # (B, num_hops*H) if self.use_penalization: attention_weights_transposed = Permute(dims=(2, 1))(attention_weights) # (B, S, num_hops) product = tf.matmul(attention_weights, attention_weights_transposed) # (B, num_hops, num_hops) identity = tf.eye(self.num_hops, batch_shape=(inputs.shape[0],)) # (B, num_hops, num_hops) frobenius_norm = tf.sqrt(tf.reduce_sum(tf.square(product - identity))) # distance self.add_loss(self.penalty_coefficient * frobenius_norm) # loss if self.model_api == 'functional': return embedding_matrix_flattened, attention_weights elif self.model_api == 'sequential': return embedding_matrix_flattened
Example #2
Source File: se.py From keras-squeeze-excite-network with MIT License | 5 votes |
def squeeze_excite_block(input_tensor, ratio=16): """ Create a channel-wise squeeze-excite block Args: input_tensor: input Keras tensor ratio: number of output filters Returns: a Keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = _tensor_shape(init)[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #3
Source File: squeeze_excitation.py From DeepPoseKit with Apache License 2.0 | 5 votes |
def channel_squeeze_excite_block(input, ratio=0.25): init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = init._keras_shape[channel_axis] cse_shape = (1, 1, filters) cse = layers.GlobalAveragePooling2D()(init) cse = layers.Reshape(cse_shape)(cse) ratio_filters = int(np.round(filters * ratio)) if ratio_filters < 1: ratio_filters += 1 cse = layers.Conv2D( ratio_filters, (1, 1), padding="same", activation="relu", kernel_initializer="he_normal", use_bias=False, )(cse) cse = layers.BatchNormalization()(cse) cse = layers.Conv2D( filters, (1, 1), activation="sigmoid", kernel_initializer="he_normal", use_bias=False, )(cse) if K.image_data_format() == "channels_first": cse = layers.Permute((3, 1, 2))(cse) cse = layers.Multiply()([init, cse]) return cse
Example #4
Source File: se.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def squeeze_excite_block(input, ratio=16): ''' Create a channel-wise squeeze-excite block Args: input: input tensor filters: number of output filters Returns: a keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) ''' init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = init._keras_shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #5
Source File: core.py From crepe with MIT License | 4 votes |
def build_and_load_model(model_capacity): """ Build the CNN model and load the weights Parameters ---------- model_capacity : 'tiny', 'small', 'medium', 'large', or 'full' String specifying the model capacity, which determines the model's capacity multiplier to 4 (tiny), 8 (small), 16 (medium), 24 (large), or 32 (full). 'full' uses the model size specified in the paper, and the others use a reduced number of filters in each convolutional layer, resulting in a smaller model that is faster to evaluate at the cost of slightly reduced pitch estimation accuracy. Returns ------- model : tensorflow.keras.models.Model The pre-trained keras model loaded in memory """ from tensorflow.keras.layers import Input, Reshape, Conv2D, BatchNormalization from tensorflow.keras.layers import MaxPool2D, Dropout, Permute, Flatten, Dense from tensorflow.keras.models import Model if models[model_capacity] is None: capacity_multiplier = { 'tiny': 4, 'small': 8, 'medium': 16, 'large': 24, 'full': 32 }[model_capacity] layers = [1, 2, 3, 4, 5, 6] filters = [n * capacity_multiplier for n in [32, 4, 4, 4, 8, 16]] widths = [512, 64, 64, 64, 64, 64] strides = [(4, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)] x = Input(shape=(1024,), name='input', dtype='float32') y = Reshape(target_shape=(1024, 1, 1), name='input-reshape')(x) for l, f, w, s in zip(layers, filters, widths, strides): y = Conv2D(f, (w, 1), strides=s, padding='same', activation='relu', name="conv%d" % l)(y) y = BatchNormalization(name="conv%d-BN" % l)(y) y = MaxPool2D(pool_size=(2, 1), strides=None, padding='valid', name="conv%d-maxpool" % l)(y) y = Dropout(0.25, name="conv%d-dropout" % l)(y) y = Permute((2, 1, 3), name="transpose")(y) y = Flatten(name="flatten")(y) y = Dense(360, activation='sigmoid', name="classifier")(y) model = Model(inputs=x, outputs=y) package_dir = os.path.dirname(os.path.realpath(__file__)) filename = "model-{}.h5".format(model_capacity) model.load_weights(os.path.join(package_dir, filename)) model.compile('adam', 'binary_crossentropy') models[model_capacity] = model return models[model_capacity]