Python tensorflow.keras.backend.l2_normalize() Examples
The following are 16
code examples of tensorflow.keras.backend.l2_normalize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: graphsage_conv.py From spektral with MIT License | 6 votes |
def call(self, inputs): features = inputs[0] fltr = inputs[1] # Enforce sparse representation if not K.is_sparse(fltr): fltr = ops.dense_to_sparse(fltr) # Propagation indices = fltr.indices N = tf.shape(features, out_type=indices.dtype)[0] indices = ops.sparse_add_self_loops(indices, N) targets, sources = indices[:, -2], indices[:, -1] messages = tf.gather(features, sources) aggregated = self.aggregate_op(messages, targets, N) output = K.concatenate([features, aggregated]) output = ops.dot(output, self.kernel) if self.use_bias: output = K.bias_add(output, self.bias) output = K.l2_normalize(output, axis=-1) if self.activation is not None: output = self.activation(output) return output
Example #2
Source File: model_triplet.py From image_search_engine with MIT License | 6 votes |
def image_model(lr=0.0001): input_1 = Input(shape=(None, None, 3)) base_model = ResNet50(weights='imagenet', include_top=False) x1 = base_model(input_1) x1 = GlobalMaxPool2D()(x1) dense_1 = Dense(vec_dim, activation="linear", name="dense_image_1") x1 = dense_1(x1) _norm = Lambda(lambda x: K.l2_normalize(x, axis=-1)) x1 = _norm(x1) model = Model([input_1], x1) model.compile(loss="mae", optimizer=Adam(lr)) model.summary() return model
Example #3
Source File: model_triplet.py From image_search_engine with MIT License | 6 votes |
def text_model(vocab_size, lr=0.0001): input_2 = Input(shape=(None,)) embed = Embedding(vocab_size, 50, name="embed") gru = Bidirectional(GRU(256, return_sequences=True), name="gru_1") dense_2 = Dense(vec_dim, activation="linear", name="dense_text_1") x2 = embed(input_2) x2 = gru(x2) x2 = GlobalMaxPool1D()(x2) x2 = dense_2(x2) _norm = Lambda(lambda x: K.l2_normalize(x, axis=-1)) x2 = _norm(x2) model = Model([input_2], x2) model.compile(loss="mae", optimizer=Adam(lr)) model.summary() return model
Example #4
Source File: keras_layers.py From DeepPavlov with Apache License 2.0 | 6 votes |
def call(self, x, **kwargs): assert isinstance(x, list) inp_a, inp_b = x last_state = K.expand_dims(inp_b[:, -1, :], 1) m = [] for i in range(self.output_dim): outp_a = inp_a * self.W[i] outp_last = last_state * self.W[i] outp_a = K.l2_normalize(outp_a, -1) outp_last = K.l2_normalize(outp_last, -1) outp = K.batch_dot(outp_a, outp_last, axes=[2, 2]) m.append(outp) if self.output_dim > 1: persp = K.concatenate(m, 2) else: persp = m[0] return [persp, persp]
Example #5
Source File: keras_layers.py From DeepPavlov with Apache License 2.0 | 6 votes |
def call(self, x, **kwargs): assert isinstance(x, list) inp_a, inp_b = x m = [] for i in range(self.output_dim): outp_a = inp_a * self.W[i] outp_b = inp_b * self.W[i] outp_a = K.l2_normalize(outp_a, -1) outp_b = K.l2_normalize(outp_b, -1) outp = K.batch_dot(outp_a, outp_b, axes=[2, 2]) outp = K.max(outp, -1, keepdims=True) m.append(outp) if self.output_dim > 1: persp = K.concatenate(m, 2) else: persp = m[0] return [persp, persp]
Example #6
Source File: keras_layers.py From DeepPavlov with Apache License 2.0 | 6 votes |
def call(self, x, **kwargs): assert isinstance(x, list) inp_a, inp_b = x outp_a = K.l2_normalize(inp_a, -1) outp_b = K.l2_normalize(inp_b, -1) alpha = K.batch_dot(outp_b, outp_a, axes=[1, 1]) alpha = K.l2_normalize(alpha, 1) hmean = K.batch_dot(outp_b, alpha, axes=[2, 1]) kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32') m = [] for i in range(self.output_dim): outp_a = inp_a * self.W[i] outp_hmean = hmean * self.W[i] outp_a = K.l2_normalize(outp_a, -1) outp_hmean = K.l2_normalize(outp_hmean, -1) outp = K.batch_dot(outp_hmean, outp_a, axes=[2, 2]) outp = K.sum(outp * kcon, -1, keepdims=True) m.append(outp) if self.output_dim > 1: persp = K.concatenate(m, 2) else: persp = m[0] return [persp, persp]
Example #7
Source File: topk_pool.py From spektral with MIT License | 5 votes |
def compute_scores(self, X, A, I): return K.dot(X, K.l2_normalize(self.kernel))
Example #8
Source File: agnn_conv.py From spektral with MIT License | 5 votes |
def call(self, inputs, **kwargs): X, A, E = self.get_inputs(inputs) X_norm = K.l2_normalize(X, axis=-1) output = self.propagate(X, A, E, X_norm=X_norm) output = self.activation(output) return output
Example #9
Source File: conv_models.py From bootcamp with Apache License 2.0 | 5 votes |
def __init__(self, batch_input_shape=(None, NUM_FRAMES, NUM_FBANKS, 1), include_softmax=False, num_speakers_softmax=None): self.include_softmax = include_softmax if self.include_softmax: assert num_speakers_softmax > 0 self.clipped_relu_count = 0 # http://cs231n.github.io/convolutional-networks/ # conv weights # #params = ks * ks * nb_filters * num_channels_input # Conv128-s # 5*5*128*128/2+128 # ks*ks*nb_filters*channels/strides+bias(=nb_filters) # take 100 ms -> 4 frames. # if signal is 3 seconds, then take 100ms per 100ms and average out this network. # 8*8 = 64 features. # used to share all the layers across the inputs # num_frames = K.shape() - do it dynamically after. inputs = Input(batch_shape=batch_input_shape, name='input') x = self.cnn_component(inputs) x = Reshape((-1, 2048))(x) # Temporal average layer. axis=1 is time. x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x) if include_softmax: logger.info('Including a Dropout layer to reduce overfitting.') # used for softmax because the dataset we pre-train on might be too small. easy to overfit. x = Dropout(0.5)(x) x = Dense(512, name='affine')(x) if include_softmax: # Those weights are just when we train on softmax. x = Dense(num_speakers_softmax, activation='softmax')(x) else: # Does not contain any weights. x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x) self.m = Model(inputs, x, name='ResCNN')
Example #10
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = self.kernel * K.expand_dims(sent2, 1) v2 = K.expand_dims(v2, 1) v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.sum(v1 * v2, axis=-1) return matching
Example #11
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = K.expand_dims(sent2, -2) * self.kernel v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.max(K.sum(K.expand_dims(v1, 2) * K.expand_dims(v2, 1), axis=-1), axis=-2) return matching
Example #12
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = K.expand_dims(sent2, -2) * self.kernel v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.sum(v1 * v2, axis=-1) return matching
Example #13
Source File: matching.py From fancy-nlp with GNU General Public License v3.0 | 5 votes |
def call(self, inputs, **kwargs): sent1 = inputs[0] sent2 = inputs[1] v1 = K.expand_dims(sent1, -2) * self.kernel v2 = K.expand_dims(sent2, -2) * self.kernel v1 = K.l2_normalize(v1, axis=-1) v2 = K.l2_normalize(v2, axis=-1) matching = K.sum(v1 * v2, axis=-1) return matching
Example #14
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _euclidian_dist(self, x_pair: List[Tensor]) -> Tensor: x1_norm = K.l2_normalize(x_pair[0], axis=1) x2_norm = K.l2_normalize(x_pair[1], axis=1) diff = x1_norm - x2_norm square = K.square(diff) _sum = K.sum(square, axis=1) _sum = K.clip(_sum, min_value=1e-12, max_value=None) dist = K.sqrt(_sum) / 2. return dist
Example #15
Source File: keras_layers.py From DeepPavlov with Apache License 2.0 | 5 votes |
def call(self, x, **kwargs): assert isinstance(x, list) inp_a, inp_b = x outp_a = K.l2_normalize(inp_a, -1) outp_b = K.l2_normalize(inp_b, -1) alpha = K.batch_dot(outp_b, outp_a, axes=[2, 2]) alpha = K.l2_normalize(alpha, 1) alpha = K.one_hot(K.argmax(alpha, 1), K.int_shape(inp_a)[1]) hmax = K.batch_dot(alpha, outp_b, axes=[1, 1]) kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32') m = [] for i in range(self.output_dim): outp_a = inp_a * self.W[i] outp_hmax = hmax * self.W[i] outp_a = K.l2_normalize(outp_a, -1) outp_hmax = K.l2_normalize(outp_hmax, -1) outp = K.batch_dot(outp_hmax, outp_a, axes=[2, 2]) outp = K.sum(outp * kcon, -1, keepdims=True) m.append(outp) if self.output_dim > 1: persp = K.concatenate(m, 2) else: persp = m[0] return [persp, persp]
Example #16
Source File: model_triplet.py From image_search_engine with MIT License | 4 votes |
def model(vocab_size, lr=0.0001): input_1 = Input(shape=(None, None, 3)) input_2 = Input(shape=(None,)) input_3 = Input(shape=(None,)) base_model = ResNet50(weights='imagenet', include_top=False) x1 = base_model(input_1) x1 = GlobalMaxPool2D()(x1) dense_1 = Dense(vec_dim, activation="linear", name="dense_image_1") x1 = dense_1(x1) embed = Embedding(vocab_size, 50, name="embed") gru = Bidirectional(GRU(256, return_sequences=True), name="gru_1") dense_2 = Dense(vec_dim, activation="linear", name="dense_text_1") x2 = embed(input_2) x2 = SpatialDropout1D(0.1)(x2) x2 = gru(x2) x2 = GlobalMaxPool1D()(x2) x2 = dense_2(x2) x3 = embed(input_3) x3 = SpatialDropout1D(0.1)(x3) x3 = gru(x3) x3 = GlobalMaxPool1D()(x3) x3 = dense_2(x3) _norm = Lambda(lambda x: K.l2_normalize(x, axis=-1)) x1 = _norm(x1) x2 = _norm(x2) x3 = _norm(x3) x = Concatenate(axis=-1)([x1, x2, x3]) model = Model([input_1, input_2, input_3], x) model.compile(loss=triplet_loss, optimizer=Adam(lr)) model.summary() return model