Python keras.layers.Lambda() Examples
The following are 30
code examples of keras.layers.Lambda().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: BBalpha_dropout.py From Dropout_BBalpha with MIT License | 6 votes |
def GenerateMCSamples(inp, layers, K_mc=20): if K_mc == 1: return apply_layers(inp, layers) output_list = [] for _ in xrange(K_mc): output_list += [apply_layers(inp, layers)] # THIS IS BAD!!! we create new dense layers at every call!!!! def pack_out(output_list): #output = K.pack(output_list) # K_mc x nb_batch x nb_classes output = K.stack(output_list) # K_mc x nb_batch x nb_classes return K.permute_dimensions(output, (1, 0, 2)) # nb_batch x K_mc x nb_classes def pack_shape(s): s = s[0] assert len(s) == 2 return (s[0], K_mc, s[1]) out = Lambda(pack_out, output_shape=pack_shape)(output_list) return out # evaluation for classification tasks
Example #2
Source File: keras_yolo.py From PiCamNN with MIT License | 6 votes |
def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V2 model CNN body in Keras.""" darknet = Model(inputs, darknet_body()(inputs)) conv13 = darknet.get_layer('batchnormalization_13').output conv20 = compose( DarknetConv2D_BN_Leaky(1024, 3, 3), DarknetConv2D_BN_Leaky(1024, 3, 3))(darknet.output) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv13_reshaped = Lambda( space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv13) # Concat conv13 with conv20. x = merge([conv13_reshaped, conv20], mode='concat') x = DarknetConv2D_BN_Leaky(1024, 3, 3)(x) x = DarknetConv2D(num_anchors * (num_classes + 5), 1, 1)(x) return Model(inputs, x)
Example #3
Source File: models.py From SeqGAN with MIT License | 6 votes |
def Highway(x, num_layers=1, activation='relu', name_prefix=''): ''' Layer wrapper function for Highway network # Arguments: x: tensor, shape = (B, input_size) # Optional Arguments: num_layers: int, dafault is 1, the number of Highway network layers activation: keras activation, default is 'relu' name_prefix: str, default is '', layer name prefix # Returns: out: tensor, shape = (B, input_size) ''' input_size = K.int_shape(x)[1] for i in range(num_layers): gate_ratio_name = '{}Highway/Gate_ratio_{}'.format(name_prefix, i) fc_name = '{}Highway/FC_{}'.format(name_prefix, i) gate_name = '{}Highway/Gate_{}'.format(name_prefix, i) gate_ratio = Dense(input_size, activation='sigmoid', name=gate_ratio_name)(x) fc = Dense(input_size, activation=activation, name=fc_name)(x) x = Lambda(lambda args: args[0] * args[2] + args[1] * (1 - args[2]), name=gate_name)([fc, x, gate_ratio]) return x
Example #4
Source File: train.py From YOLO-3D-Box with MIT License | 6 votes |
def train(model, image_data, y_true, log_dir='logs/'): '''retrain/fine-tune the model''' model.compile(optimizer='adam', loss={ # use custom yolo_loss Lambda layer. 'yolo_loss': lambda y_true, y_pred: y_pred}) logging = TensorBoard(log_dir=log_dir) checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5", monitor='val_loss', save_weights_only=True, save_best_only=True) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto') model.fit([image_data, *y_true], np.zeros(len(image_data)), validation_split=.1, batch_size=32, epochs=30, callbacks=[logging, checkpoint, early_stopping]) model.save_weights(log_dir + 'trained_weights.h5') # Further training.
Example #5
Source File: siamese.py From DogEmbeddings with MIT License | 6 votes |
def SiameseNetwork(input_shape=(5880,)): base_network = create_base_network(input_shape) input_a = Input(shape=input_shape) input_b = Input(shape=input_shape) processed_a = base_network(input_a) processed_b = base_network(input_b) distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b]) model = Model([input_a, input_b], distance) rms = RMSprop() model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy]) return model, base_network
Example #6
Source File: common.py From imgclsmob with MIT License | 6 votes |
def channel_shuffle_lambda(channels, groups, **kwargs): """ Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups. Parameters: ---------- channels : int Number of channels. groups : int Number of groups. Returns ------- Layer Channel shuffle layer. """ assert (channels % groups == 0) return nn.Lambda(channel_shuffle, arguments={"groups": groups}, **kwargs)
Example #7
Source File: models.py From tartarus with MIT License | 6 votes |
def get_model_41(params): embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb")) # main sequential model model = Sequential() model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'], weights=embedding_weights)) #model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim']))) model.add(LSTM(2048)) #model.add(Dropout(params['dropout_prob'][1])) model.add(Dense(output_dim=params["n_out"], init="uniform")) model.add(Activation(params['final_activation'])) logging.debug("Output CNN: %s" % str(model.output_shape)) if params['final_activation'] == 'linear': model.add(Lambda(lambda x :K.l2_normalize(x, axis=1))) return model # CRNN Arch for audio
Example #8
Source File: sdne_utils.py From GEM-Benchmark with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_variational_encoder(node_num, d, n_units, nu1, nu2, activation_fn): K = len(n_units) + 1 # Input x = Input(shape=(node_num,)) # Encoder layers y = [None] * (K + 3) y[0] = x for i in range(K - 1): y[i + 1] = Dense(n_units[i], activation=activation_fn, W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i]) y[K] = Dense(d, activation=activation_fn, W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1]) y[K + 1] = Dense(d)(y[K - 1]) # y[K + 1] = Dense(d, W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1]) y[K + 2] = Lambda(sampling, output_shape=(d,))([y[K], y[K + 1]]) # Encoder model encoder = Model(input=x, outputs=[y[K], y[K + 1], y[K + 2]]) return encoder
Example #9
Source File: keras_yolo.py From object-detection with MIT License | 6 votes |
def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V2 model CNN body in Keras.""" darknet = Model(inputs, darknet_body()(inputs)) conv20 = compose( DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output) conv13 = darknet.layers[43].output conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv21_reshaped = Lambda( space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv21) x = concatenate([conv21_reshaped, conv20]) x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x) x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x) return Model(inputs, x)
Example #10
Source File: competition_model_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def crop(dimension, start, end): # Crops (or slices) a Tensor on a given dimension from start to end # example : to crop tensor x[:, :, 5:10] # call slice(2, 5, 10) as you want to crop on the second dimension def func(x): if dimension == 0: return x[start: end] if dimension == 1: return x[:, start: end] if dimension == 2: return x[:, :, start: end] if dimension == 3: return x[:, :, :, start: end] if dimension == 4: return x[:, :, :, :, start: end] return Lambda(func)
Example #11
Source File: seq2seq_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def crop(dimension, start, end): # Crops (or slices) a Tensor on a given dimension from start to end # example : to crop tensor x[:, :, 5:10] # call slice(2, 5, 10) as you want to crop on the second dimension def func(x): if dimension == 0: return x[start: end] if dimension == 1: return x[:, start: end] if dimension == 2: return x[:, :, start: end] if dimension == 3: return x[:, :, :, start: end] if dimension == 4: return x[:, :, :, :, start: end] return Lambda(func)
Example #12
Source File: weather_model.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def crop(dimension, start, end): # Crops (or slices) a Tensor on a given dimension from start to end # example : to crop tensor x[:, :, 5:10] # call slice(2, 5, 10) as you want to crop on the second dimension def func(x): if dimension == 0: return x[start: end] if dimension == 1: return x[:, start: end] if dimension == 2: return x[:, :, start: end] if dimension == 3: return x[:, :, :, start: end] if dimension == 4: return x[:, :, :, :, start: end] return Lambda(func)
Example #13
Source File: layers_builder.py From PSPNet-Keras-tensorflow with MIT License | 5 votes |
def build_pspnet(nb_classes, resnet_layers, input_shape, activation='softmax'): """Build PSPNet.""" print("Building a PSPNet based on ResNet %i expecting inputs of shape %s predicting %i classes" % ( resnet_layers, input_shape, nb_classes)) inp = Input((input_shape[0], input_shape[1], 3)) res = ResNet(inp, layers=resnet_layers) psp = build_pyramid_pooling_module(res, input_shape) x = Conv2D(512, (3, 3), strides=(1, 1), padding="same", name="conv5_4", use_bias=False)(psp) x = BN(name="conv5_4_bn")(x) x = Activation('relu')(x) x = Dropout(0.1)(x) x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="conv6")(x) # x = Lambda(Interp, arguments={'shape': ( # input_shape[0], input_shape[1])})(x) x = Interp([input_shape[0], input_shape[1]])(x) x = Activation('softmax')(x) model = Model(inputs=inp, outputs=x) # Solver sgd = SGD(lr=learning_rate, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) return model
Example #14
Source File: train.py From keras-yolo3-master with MIT License | 5 votes |
def create_model(input_shape, anchors, num_classes, load_pretrained=False, freeze_body=False, weights_path='model_data/yolo_weights.h5'): K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo_body(image_input, num_anchors//3, num_classes) print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body: # Do not freeze 3 output layers. num = len(model_body.layers)-7 for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model
Example #15
Source File: model.py From EasyPR-python with Apache License 2.0 | 5 votes |
def rpn_graph(feature_map, anchors_per_location, anchor_stride): """Builds the computation graph of Region Proposal Network. feature_map: backbone features [batch, height, width, depth] anchors_per_location: number of anchors per pixel in the feature map anchor_stride: Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2 (every other pixel). Returns: rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax) rpn_probs: [batch, W, W, 2] Anchor classifier probabilities. rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be applied to anchors. """ # TODO: check if stride of 2 causes alignment issues if the featuremap # is not even. # Shared convolutional base of the RPN shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride, name='rpn_conv_shared')(feature_map) # Anchor Score. [batch, height, width, anchors per location * 2]. x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear', name='rpn_class_raw')(shared) # Reshape to [batch, anchors, 2] rpn_class_logits = KL.Lambda( lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) # Softmax on last dimension of BG/FG. rpn_probs = KL.Activation("softmax", name="rpn_class_xxx")(rpn_class_logits) # Bounding box refinement. [batch, H, W, anchors per location, depth] # where depth is [x, y, log(w), log(h)] x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid", activation='linear', name='rpn_bbox_pred')(shared) # Reshape to [batch, anchors, 4] rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) return [rpn_class_logits, rpn_probs, rpn_bbox]
Example #16
Source File: conv2d_r.py From Coloring-greyscale-images with MIT License | 5 votes |
def Conv2D_r(channels, filter_size, strides, features): padding = [[0, 0], [filter_size // 2, filter_size // 2], [filter_size // 2, filter_size // 2], [0, 0]] out = Lambda(lambda net: tf.pad(net, padding, 'REFLECT'))(features) out = ConvSN2D(channels, filter_size, strides=strides, padding='valid')(out) return out
Example #17
Source File: layers_builder.py From PSPNet-Keras-tensorflow with MIT License | 5 votes |
def interp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (473, 473): kernel_strides_map = {1: 60, 2: 30, 3: 20, 6: 10} elif input_shape == (713, 713): kernel_strides_map = {1: 90, 2: 45, 3: 30, 6: 15} else: print("Pooling parameters for input shape ", input_shape, " are not defined.") exit(1) names = [ "conv5_3_pool" + str(level) + "_conv", "conv5_3_pool" + str(level) + "_conv_bn" ] kernel = (kernel_strides_map[level], kernel_strides_map[level]) strides = (kernel_strides_map[level], kernel_strides_map[level]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = BN(name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) # prev_layer = Lambda(Interp, arguments={ # 'shape': feature_map_shape})(prev_layer) prev_layer = Interp(feature_map_shape)(prev_layer) return prev_layer
Example #18
Source File: decode.py From keras-centernet with MIT License | 5 votes |
def CtDetDecode(model, hm_index=3, reg_index=4, wh_index=5, k=100, output_stride=4): def _decode(args): hm, reg, wh = args return _ctdet_decode(hm, reg, wh, k=k, output_stride=output_stride) output = Lambda(_decode)([model.outputs[i] for i in [hm_index, reg_index, wh_index]]) model = Model(model.input, output) return model
Example #19
Source File: decode.py From keras-centernet with MIT License | 5 votes |
def HpDetDecode(model, hm_index=6, wh_index=11, kps_index=9, reg_index=10, hm_hp_index=7, hp_offset_index=8, k=100, output_stride=4): def _decode(args): hm, wh, kps, reg, hm_hp, hp_offset = args return _hpdet_decode(hm, wh, kps, reg, hm_hp, hp_offset, k=k, output_stride=output_stride) output = Lambda(_decode)( [model.outputs[i] for i in [hm_index, wh_index, kps_index, reg_index, hm_hp_index, hp_offset_index]]) model = Model(model.input, output) return model
Example #20
Source File: rct_robot.py From robotreviewer with GNU General Public License v3.0 | 5 votes |
def __init__(self): from keras.preprocessing import sequence from keras.models import load_model from keras.models import Sequential from keras.preprocessing import sequence from keras.layers import Dense, Dropout, Activation, Lambda, Input, merge, Flatten from keras.layers import Embedding from keras.layers import Convolution1D, MaxPooling1D from keras import backend as K from keras.models import Model from keras.regularizers import l2 global sequence, load_model, Sequential, Dense, Dropout, Activation, Lambda, Input, merge, Flatten global Embedding, Convolution1D, MaxPooling1D, K, Model, l2 self.svm_clf = MiniClassifier(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_svm_weights.npz')) cnn_weight_files = glob.glob(os.path.join(robotreviewer.DATA_ROOT, 'rct/*.h5')) self.cnn_clfs = [load_model(cnn_weight_file) for cnn_weight_file in cnn_weight_files] self.svm_vectorizer = HashingVectorizer(binary=False, ngram_range=(1, 1), stop_words='english') self.cnn_vectorizer = KerasVectorizer(vocab_map_file=os.path.join(robotreviewer.DATA_ROOT, 'rct/cnn_vocab_map.pck'), stop_words='english') with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_model_calibration.json'), 'r') as f: self.constants = json.load(f) self.calibration_lr = {} with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_ptyp_calibration.pck'), 'rb') as f: self.calibration_lr['svm_cnn_ptyp'] = pickle.load(f) with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_calibration.pck'), 'rb') as f: self.calibration_lr['svm_cnn'] = pickle.load(f)
Example #21
Source File: mnist_train.py From VAE-for-Image-Generation with MIT License | 5 votes |
def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp(z_log_var / 2) * epsilon #z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
Example #22
Source File: test_keras.py From wtte-rnn with MIT License | 5 votes |
def model_no_masking(discrete_time, init_alpha, max_beta): model = Sequential() model.add(TimeDistributed(Dense(2), input_shape=(n_timesteps, n_features))) model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha, "max_beta_value": max_beta})) if discrete_time: loss = wtte.loss(kind='discrete').loss_function else: loss = wtte.loss(kind='continuous').loss_function model.compile(loss=loss, optimizer=RMSprop(lr=lr)) return model
Example #23
Source File: inception.py From keras-face-recognition with MIT License | 5 votes |
def _inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): channel_axis = 3 if block_idx is None: prefix = None else: prefix = '_'.join((block_type, str(block_idx))) name_fmt = partial(_generate_layer_name, prefix=prefix) if block_type == 'Block35': branch_0 = conv2d_bn(x, 32, 1, name=name_fmt('Conv2d_1x1', 0)) branch_1 = conv2d_bn(x, 32, 1, name=name_fmt('Conv2d_0a_1x1', 1)) branch_1 = conv2d_bn(branch_1, 32, 3, name=name_fmt('Conv2d_0b_3x3', 1)) branch_2 = conv2d_bn(x, 32, 1, name=name_fmt('Conv2d_0a_1x1', 2)) branch_2 = conv2d_bn(branch_2, 32, 3, name=name_fmt('Conv2d_0b_3x3', 2)) branch_2 = conv2d_bn(branch_2, 32, 3, name=name_fmt('Conv2d_0c_3x3', 2)) branches = [branch_0, branch_1, branch_2] elif block_type == 'Block17': branch_0 = conv2d_bn(x, 128, 1, name=name_fmt('Conv2d_1x1', 0)) branch_1 = conv2d_bn(x, 128, 1, name=name_fmt('Conv2d_0a_1x1', 1)) branch_1 = conv2d_bn(branch_1, 128, [1, 7], name=name_fmt('Conv2d_0b_1x7', 1)) branch_1 = conv2d_bn(branch_1, 128, [7, 1], name=name_fmt('Conv2d_0c_7x1', 1)) branches = [branch_0, branch_1] elif block_type == 'Block8': branch_0 = conv2d_bn(x, 192, 1, name=name_fmt('Conv2d_1x1', 0)) branch_1 = conv2d_bn(x, 192, 1, name=name_fmt('Conv2d_0a_1x1', 1)) branch_1 = conv2d_bn(branch_1, 192, [1, 3], name=name_fmt('Conv2d_0b_1x3', 1)) branch_1 = conv2d_bn(branch_1, 192, [3, 1], name=name_fmt('Conv2d_0c_3x1', 1)) branches = [branch_0, branch_1] mixed = Concatenate(axis=channel_axis, name=name_fmt('Concatenate'))(branches) up = conv2d_bn(mixed,K.int_shape(x)[channel_axis],1,activation=None,use_bias=True, name=name_fmt('Conv2d_1x1')) up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': scale})(up) x = add([x, up]) if activation is not None: x = Activation(activation, name=name_fmt('Activation'))(x) return x
Example #24
Source File: test_metrics.py From seqeval with MIT License | 5 votes |
def test_keras_callback(self): expected_score = f1_score(self.y_true, self.y_pred) tokenizer = Tokenizer(lower=False) tokenizer.fit_on_texts(self.y_true) maxlen = max((len(row) for row in self.y_true)) def prepare(y, padding): indexes = tokenizer.texts_to_sequences(y) padded = pad_sequences(indexes, maxlen=maxlen, padding=padding, truncating=padding) categorical = to_categorical(padded) return categorical for padding in ('pre', 'post'): callback = F1Metrics(id2label=tokenizer.index_word) y_true_cat = prepare(self.y_true, padding) y_pred_cat = prepare(self.y_pred, padding) input_shape = (1,) layer = Lambda(lambda _: constant(y_pred_cat), input_shape=input_shape) fake_model = Sequential(layers=[layer]) callback.set_model(fake_model) X = numpy.zeros((y_true_cat.shape[0], 1)) # Verify that the callback translates sequences correctly by itself y_true_cb, y_pred_cb = callback.predict(X, y_true_cat) self.assertEqual(y_pred_cb, self.y_pred) self.assertEqual(y_true_cb, self.y_true) # Verify that the callback stores the correct number in logs fake_model.compile(optimizer='adam', loss='categorical_crossentropy') history = fake_model.fit(x=X, batch_size=y_true_cat.shape[0], y=y_true_cat, validation_data=(X, y_true_cat), callbacks=[callback]) actual_score = history.history['f1'][0] self.assertAlmostEqual(actual_score, expected_score)
Example #25
Source File: pspnet.py From keras-image-segmentation with MIT License | 5 votes |
def interp_block(x, num_filters=512, level=1, input_shape=(512, 512, 3), output_stride=16): feature_map_shape = (input_shape[0] / output_stride, input_shape[1] / output_stride) # compute dataformat if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 if output_stride == 16: scale = 5 elif output_stride == 8: scale = 10 kernel = (level*scale, level*scale) strides = (level*scale, level*scale) global_feat = AveragePooling2D(kernel, strides=strides, name='pool_level_%s_%s'%(level, output_stride))(x) global_feat = _conv( filters=num_filters, kernel_size=(1, 1), padding='same', name='conv_level_%s_%s'%(level,output_stride))(global_feat) global_feat = BatchNormalization(axis=bn_axis, name='bn_level_%s_%s'%(level, output_stride))(global_feat) global_feat = Lambda(Interp, arguments={'shape': feature_map_shape})(global_feat) return global_feat # squeeze and excitation function
Example #26
Source File: train.py From BERT with Apache License 2.0 | 5 votes |
def sparse_gather(y_pred, target_indices, task_name): clf_h = Lambda(lambda x: K.reshape(x, (-1, K.int_shape(x)[-1])), name=task_name + '_flatten')(y_pred) return Lambda(lambda x: K.gather(x[0], K.cast(x[1], 'int32')), name=task_name + '_gather')([clf_h, target_indices])
Example #27
Source File: BBalpha_dropout.py From Dropout_BBalpha with MIT License | 5 votes |
def pW(p): layer = Lambda(lambda x: x*(1.0-p), output_shape=lambda shape: shape) return layer
Example #28
Source File: BBalpha_dropout.py From Dropout_BBalpha with MIT License | 5 votes |
def Dropout_mc(p): layer = Lambda(lambda x: K.dropout(x, p), output_shape=lambda shape: shape) return layer
Example #29
Source File: keras_yolo.py From object-detection with MIT License | 5 votes |
def space_to_depth_x2_output_shape(input_shape): """Determine space_to_depth output shape for block_size=2. Note: For Lambda with TensorFlow backend, output shape may not be needed. """ return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 * input_shape[3]) if input_shape[1] else (input_shape[0], None, None, 4 * input_shape[3])
Example #30
Source File: keras_yolo.py From object-detection with MIT License | 5 votes |
def space_to_depth_x2(x): """Thin wrapper for Tensorflow space_to_depth with block_size=2.""" # Import currently required to make Lambda work. # See: https://github.com/fchollet/keras/issues/5088#issuecomment-273851273 import tensorflow as tf return tf.space_to_depth(x, block_size=2)