Python keras.layers.add() Examples
The following are 30
code examples of keras.layers.add().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels, kernel_height, kernel_width = 3, 5, 5 # Define a model model = Sequential() model.add( Conv2D( input_shape=input_shape, dilation_rate=(2, 2), filters=num_kernels, kernel_size=(kernel_height, kernel_width), ) ) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model, model_precision=model_precision)
Example #2
Source File: idenprof.py From IdenProf with MIT License | 6 votes |
def resnet_first_block_first_module(input, channel_depth): residual_input = input stride = 1 residual_input = Conv2D(channel_depth, kernel_size=1, strides=1, padding="same", kernel_initializer="he_normal")( residual_input) residual_input = BatchNormalization()(residual_input) input = Conv2D(int(channel_depth / 4), kernel_size=1, strides=stride, padding="same", kernel_initializer="he_normal")(input) input = BatchNormalization()(input) input = Activation("relu")(input) input = Conv2D(int(channel_depth / 4), kernel_size=3, strides=stride, padding="same", kernel_initializer="he_normal")(input) input = BatchNormalization()(input) input = Activation("relu")(input) input = Conv2D(channel_depth, kernel_size=1, strides=stride, padding="same", kernel_initializer="he_normal")(input) input = BatchNormalization()(input) input = add([input, residual_input]) input = Activation("relu")(input) return input
Example #3
Source File: octave_resnet.py From keras-octconv with MIT License | 6 votes |
def _octresnet_final_bottleneck_block(ip, filters, alpha=0.5, strides=(1, 1), downsample_shortcut=False, expansion=4): x_high_res, x_low_res = ip x_high, x_low = oct_conv_bn_relu(x_high_res, x_low_res, filters, kernel_size=(1, 1), alpha=alpha) x_high, x_low = oct_conv_bn_relu(x_high, x_low, filters, kernel_size=(3, 3), strides=strides, alpha=alpha) final_filters = int(filters * expansion) x_high = final_oct_conv_bn_relu(x_high, x_low, final_filters, kernel_size=(1, 1), activation=False) if downsample_shortcut: x_high_res = final_oct_conv_bn_relu(x_high_res, x_low_res, final_filters, kernel_size=(1, 1), strides=strides, activation=False) x = add([x_high, x_high_res]) x = ReLU()(x) return x
Example #4
Source File: octave_resnet.py From keras-octconv with MIT License | 6 votes |
def _bottleneck_original(ip, filters, strides=(1, 1), downsample_shortcut=False, expansion=4): final_filters = int(filters * expansion) shortcut = ip x = _conv_bn_relu(ip, filters, kernel_size=(1, 1)) x = _conv_bn_relu(x, filters, kernel_size=(3, 3), strides=strides) x = _conv_bn_relu(x, final_filters, kernel_size=(1, 1), activation=False) if downsample_shortcut: shortcut = _conv_block(shortcut, final_filters, kernel_size=(1, 1), strides=strides) x = add([x, shortcut]) x = ReLU()(x) return x
Example #5
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_inner_product(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) # Define a model model = Sequential() model.add(Dense(2, input_shape=(2,))) # Test all zeros model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_model(model, mode="zeros", model_precision=model_precision) # Test all ones model.set_weights([np.ones(w.shape) for w in model.get_weights()]) self._test_model(model, mode="ones", model_precision=model_precision) # Test random model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) self._test_model(model, model_precision=model_precision)
Example #6
Source File: se.py From keras-squeeze-excite-network with MIT License | 6 votes |
def channel_spatial_squeeze_excite(input, ratio=16): ''' Create a spatial squeeze-excite block Args: input: input tensor filters: number of output filters Returns: a keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) - [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) ''' cse = squeeze_excite_block(input, ratio) sse = spatial_squeeze_excite_block(input) x = add([cse, sse]) return x
Example #7
Source File: se.py From keras-squeeze-excite-network with MIT License | 6 votes |
def channel_spatial_squeeze_excite(input_tensor, ratio=16): """ Create a spatial squeeze-excite block Args: input_tensor: input Keras tensor ratio: number of output filters Returns: a Keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) - [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) """ cse = squeeze_excite_block(input_tensor, ratio) sse = spatial_squeeze_excite_block(input_tensor) x = add([cse, sse]) return x
Example #8
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_ones(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels, kernel_height, kernel_width = 3, 5, 5 # Define a model model = Sequential() model.add( Conv2D( input_shape=input_shape, filters=num_kernels, kernel_size=(kernel_height, kernel_width), ) ) # Set some random weights model.set_weights([np.ones(w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model, model_precision=model_precision)
Example #9
Source File: bigan.py From Keras-BiGAN with MIT License | 6 votes |
def d_block(inp, fil, p = True): skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) if p: out = AveragePooling2D()(out) return out
Example #10
Source File: bigan.py From Keras-BiGAN with MIT License | 6 votes |
def g_block(inp, fil, u = True): if u: out = UpSampling2D(interpolation = 'bilinear')(inp) else: out = Activation('linear')(inp) skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) return out
Example #11
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_conv_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) input_dim = 10 input_shape = (input_dim, input_dim, 1) num_kernels, kernel_height, kernel_width = 3, 5, 5 # Define a model model = Sequential() model.add( Conv2D( input_shape=input_shape, filters=num_kernels, kernel_size=(kernel_height, kernel_width), ) ) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model, model_precision=model_precision)
Example #12
Source File: model.py From segmentation_training_pipeline with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN Implements right "same" padding for even kernel sizes Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & poinwise convs epsilon: epsilon to use in BN layer """ if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #13
Source File: model.py From segmentation_training_pipeline with MIT License | 5 votes |
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1): in_channels = inputs._keras_shape[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'expanded_conv_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = Activation(relu6, name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', dilation_rate=(rate, rate), name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = Activation(relu6, name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if skip_connection: return Add(name=prefix + 'add')([inputs, x]) # if in_channels == pointwise_filters and stride == 1: # return Add(name='res_connect_' + str(block_id))([inputs, x]) return x
Example #14
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_inner_product_random(self, model_precision=_MLMODEL_FULL_PRECISION): np.random.seed(1988) # Define a model model = Sequential() model.add(Dense(1000, input_shape=(100,))) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model, model_precision=model_precision)
Example #15
Source File: gc_resnet.py From keras-global-context-networks with MIT License | 5 votes |
def _resnet_bottleneck_block(input, filters, k=1, strides=(1, 1)): ''' Adds a pre-activation resnet block with bottleneck layers Args: input: input tensor filters: number of output filters k: width factor strides: strides of the convolution layer Returns: a keras tensor ''' init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 bottleneck_expand = 4 x = BatchNormalization(axis=channel_axis)(input) x = Activation('relu')(x) if strides != (1, 1) or init._keras_shape[channel_axis] != bottleneck_expand * filters * k: init = Conv2D(bottleneck_expand * filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False, strides=strides)(x) x = Conv2D(filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False, strides=strides)(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv2D(bottleneck_expand * filters * k, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False)(x) # global context block x = global_context_block(x) m = add([x, init]) return m
Example #16
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_dense_softmax(self): np.random.seed(1988) # Define a model model = Sequential() model.add(Dense(32, input_shape=(32,), activation="softmax")) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model)
Example #17
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_dense_elu(self): np.random.seed(1988) # Define a model model = Sequential() model.add(Dense(32, input_shape=(32,), activation="elu")) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model)
Example #18
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_dense_selu(self): np.random.seed(1988) # Define a model model = Sequential() model.add(Dense(32, input_shape=(32,), activation="selu")) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model(model)
Example #19
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_tiny_conv_random_input_shape_dict( self, model_precision=_MLMODEL_FULL_PRECISION ): np.random.seed(1988) H, W, C = 10, 20, 5 input_shape = (None, H, W, C) num_kernels, kernel_height, kernel_width = 3, 5, 5 # Define a model model = Sequential() model.add( Conv2D( input_shape=(None, None, C), filters=num_kernels, kernel_size=(kernel_height, kernel_width), ) ) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Test the keras model self._test_model( model, input_name_shape_dict={"data": input_shape}, model_precision=model_precision, )
Example #20
Source File: model.py From CycleGAN-Keras with GNU General Public License v3.0 | 5 votes |
def Rk(self, x0): k = int(x0.shape[-1]) # first layer x = ReflectionPadding2D((1,1))(x0) x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid')(x) x = self.normalization(axis=3, center=True, epsilon=1e-5)(x, training=True) x = Activation('relu')(x) # second layer x = ReflectionPadding2D((1, 1))(x) x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid')(x) x = self.normalization(axis=3, center=True, epsilon=1e-5)(x, training=True) # merge x = add([x, x0]) return x
Example #21
Source File: model.py From segmentation_training_pipeline with MIT License | 5 votes |
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride, rate=1, depth_activation=False, return_skip=False): """ Basic building block of modified Xception network Args: inputs: input tensor depth_list: number of filters in each SepConv layer. len(depth_list) == 3 prefix: prefix before name skip_connection_type: one of {'conv','sum','none'} stride: stride at last depthwise conv rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & pointwise convs return_skip: flag to return additional tensor after 2 SepConvs for decoder """ residual = inputs for i in range(3): residual = SepConv_BN(residual, depth_list[i], prefix + '_separable_conv{}'.format(i + 1), stride=stride if i == 2 else 1, rate=rate, depth_activation=depth_activation) if i == 1: skip = residual if skip_connection_type == 'conv': shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut', kernel_size=1, stride=stride) shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut) outputs = layers.add([residual, shortcut]) elif skip_connection_type == 'sum': outputs = layers.add([residual, inputs]) elif skip_connection_type == 'none': outputs = residual if return_skip: return outputs, skip else: return outputs
Example #22
Source File: resnet50.py From Model-Playgrounds with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x
Example #23
Source File: resnet50.py From image-segmentation-keras with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if IMAGE_ORDERING == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, data_format=IMAGE_ORDERING, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), data_format=IMAGE_ORDERING, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x
Example #24
Source File: utilModelREDNet.py From document-image-binarization with GNU General Public License v3.0 | 5 votes |
def build_REDNet(nb_layers, input_size, nb_filters=32, k_size=3, dropout=0, strides=1, every=1): # -> CONV/FC -> BatchNorm -> ReLu(or other activation) -> Dropout -> CONV/FC -> # https://arxiv.org/pdf/1502.03167.pdf input_img = Input(shape=(input_size, input_size, 1)) x = input_img if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 encoderLayers = [None] * nb_layers for i in range(nb_layers): x = Conv2D(nb_filters, kernel_size=k_size, strides=strides, padding='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) if dropout > 0: x = Dropout(dropout)(x) encoderLayers[i] = x encoded = x for i in range(nb_layers): ind = nb_layers - i - 1 x = layers.add([x, encoderLayers[ind]]) x = Conv2DTranspose(nb_filters, kernel_size=k_size, strides=strides, padding='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) if dropout > 0: x = Dropout(dropout)(x) decoded = Conv2D(1, kernel_size=k_size, strides=1, padding='same', activation='sigmoid')(x) autoencoder = Model(input_img, decoded) return autoencoder, encoded, decoded
Example #25
Source File: model.py From edafa with MIT License | 5 votes |
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1): in_channels = inputs._keras_shape[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'expanded_conv_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = Activation(relu6, name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', dilation_rate=(rate, rate), name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = Activation(relu6, name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if skip_connection: return Add(name=prefix + 'add')([inputs, x]) # if in_channels == pointwise_filters and stride == 1: # return Add(name='res_connect_' + str(block_id))([inputs, x]) return x
Example #26
Source File: model.py From edafa with MIT License | 5 votes |
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride, rate=1, depth_activation=False, return_skip=False): """ Basic building block of modified Xception network Args: inputs: input tensor depth_list: number of filters in each SepConv layer. len(depth_list) == 3 prefix: prefix before name skip_connection_type: one of {'conv','sum','none'} stride: stride at last depthwise conv rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & pointwise convs return_skip: flag to return additional tensor after 2 SepConvs for decoder """ residual = inputs for i in range(3): residual = SepConv_BN(residual, depth_list[i], prefix + '_separable_conv{}'.format(i + 1), stride=stride if i == 2 else 1, rate=rate, depth_activation=depth_activation) if i == 1: skip = residual if skip_connection_type == 'conv': shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut', kernel_size=1, stride=stride) shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut) outputs = layers.add([residual, shortcut]) elif skip_connection_type == 'sum': outputs = layers.add([residual, inputs]) elif skip_connection_type == 'none': outputs = residual if return_skip: return outputs, skip else: return outputs
Example #27
Source File: model.py From edafa with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN Implements right "same" padding for even kernel sizes Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & poinwise convs epsilon: epsilon to use in BN layer """ if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #28
Source File: multiRes.py From kits19.MIScnn with GNU General Public License v3.0 | 5 votes |
def ResPath(filters, length, inp): ''' ResPath Arguments: filters {int} -- [description] length {int} -- length of ResPath inp {keras layer} -- input layer Returns: [keras layer] -- [output layer] ''' shortcut = inp shortcut = conv3d_bn(shortcut, filters , 1, 1, 1, activation=None, padding='same') out = conv3d_bn(inp, filters, 3, 3, 3, activation='relu', padding='same') out = add([shortcut, out]) out = Activation('relu')(out) out = BatchNormalization(axis=4)(out) for i in range(length-1): shortcut = out shortcut = conv3d_bn(shortcut, filters , 1, 1, 1, activation=None, padding='same') out = conv3d_bn(out, filters, 3, 3, 3, activation='relu', padding='same') out = add([shortcut, out]) out = Activation('relu')(out) out = BatchNormalization(axis=4)(out) return out
Example #29
Source File: multiRes.py From kits19.MIScnn with GNU General Public License v3.0 | 5 votes |
def MultiResBlock(U, inp, alpha = 1.67): ''' MultiRes Block Arguments: U {int} -- Number of filters in a corrsponding UNet stage inp {keras layer} -- input layer Returns: [keras layer] -- [output layer] ''' W = alpha * U shortcut = inp shortcut = conv3d_bn(shortcut, int(W*0.167) + int(W*0.333) + int(W*0.5), 1, 1, 1, activation=None, padding='same') conv3x3 = conv3d_bn(inp, int(W*0.167), 3, 3, 3, activation='relu', padding='same') conv5x5 = conv3d_bn(conv3x3, int(W*0.333), 3, 3, 3, activation='relu', padding='same') conv7x7 = conv3d_bn(conv5x5, int(W*0.5), 3, 3, 3, activation='relu', padding='same') out = concatenate([conv3x3, conv5x5, conv7x7], axis=4) out = BatchNormalization(axis=4)(out) out = add([shortcut, out]) out = Activation('relu')(out) out = BatchNormalization(axis=4)(out) return out
Example #30
Source File: lednet.py From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 | 5 votes |
def apn_module(self, x): def right(x): x = layers.AveragePooling2D()(x) x = layers.Conv2D(self.classes, kernel_size=1, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.UpSampling2D(interpolation='bilinear')(x) return x def conv(x, filters, kernel_size, stride): x = layers.Conv2D(filters, kernel_size=kernel_size, strides=(stride, stride), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return x x_7 = conv(x, int(x.shape[-1]), 7, stride=2) x_5 = conv(x_7, int(x.shape[-1]), 5, stride=2) x_3 = conv(x_5, int(x.shape[-1]), 3, stride=2) x_3_1 = conv(x_3, self.classes, 3, stride=1) x_3_1_up = layers.UpSampling2D(interpolation='bilinear')(x_3_1) x_5_1 = conv(x_5, self.classes, 5, stride=1) x_3_5 = layers.add([x_5_1, x_3_1_up]) x_3_5_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5) x_7_1 = conv(x_7, self.classes, 3, stride=1) x_3_5_7 = layers.add([x_7_1, x_3_5_up]) x_3_5_7_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5_7) x_middle = conv(x, self.classes, 1, stride=1) x_middle = layers.multiply([x_3_5_7_up, x_middle]) x_right = right(x) x_middle = layers.add([x_middle, x_right]) return x_middle