Python tensorflow.keras.layers.Conv2D() Examples
The following are 30
code examples of tensorflow.keras.layers.Conv2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.layers
, or try the search function
.
Example #1
Source File: model.py From DexiNed with MIT License | 7 votes |
def __init__(self, out_features,**kwargs): super(_DenseLayer, self).__init__(**kwargs) k_reg = None if w_decay is None else l2(w_decay) self.layers = [] self.layers.append(tf.keras.Sequential( [ layers.ReLU(), layers.Conv2D( filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same', use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg), layers.BatchNormalization(), layers.ReLU(), layers.Conv2D( filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same', use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg), layers.BatchNormalization(), ])) # first relu can be not needed
Example #2
Source File: layers.py From RLs with Apache License 2.0 | 6 votes |
def ConvLayer(conv_function=Conv2D, filters=[32, 64, 64], kernels=[[8, 8], [4, 4], [3, 3]], strides=[[4, 4], [2, 2], [1, 1]], padding='valid', activation='relu'): ''' Params: conv_function: the convolution function filters: list of flitter of all hidden conv layers kernels: list of kernel of all hidden conv layers strides: list of stride of all hidden conv layers padding: padding mode activation: activation function Return: A sequential of multi-convolution layers, with Flatten. ''' layers = Sequential([conv_function(filters=f, kernel_size=k, strides=s, padding=padding, activation=activation) for f, k, s in zip(filters, kernels, strides)]) layers.add(Flatten()) return layers
Example #3
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 6 votes |
def __init__(self, filters, kernel_size, norm_type="instance", pad_type="constant", **kwargs): super(ResBlock, self).__init__(name="ResBlock") padding = (kernel_size - 1) // 2 padding = (padding, padding) self.model = tf.keras.models.Sequential() self.model.add(get_padding(pad_type, padding)) self.model.add(Conv2D(filters, kernel_size)) self.model.add(get_norm(norm_type)) self.model.add(ReLU()) self.model.add(get_padding(pad_type, padding)) self.model.add(Conv2D(filters, kernel_size)) self.model.add(get_norm(norm_type)) self.add = Add()
Example #4
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 6 votes |
def __init__(self, filters=64, lrelu_alpha=0.2, pad_type="constant", norm_type="batch", **kwargs): super(StridedConv, self).__init__(name="StridedConv") self.model = tf.keras.models.Sequential() self.model.add(get_padding(pad_type, (1, 1))) self.model.add(Conv2D(filters, 3, strides=(2, 2))) self.model.add(LeakyReLU(lrelu_alpha)) self.model.add(get_padding(pad_type, (1, 1))) self.model.add(Conv2D(filters * 2, 3)) self.model.add(get_norm(norm_type)) self.model.add(LeakyReLU(lrelu_alpha))
Example #5
Source File: BeplerContactPredictor.py From tape-neurips2019 with MIT License | 6 votes |
def __init__(self, input_name: str = 'encoder_output', output_name: str = 'contact_prob'): super().__init__() self._input_name = input_name self._output_name = output_name def concat_pairs(tensor): input_mul = tensor[:, :, None] * tensor[:, None, :] input_sub = tf.abs(tensor[:, :, None] - tensor[:, None, :]) output = tf.concat((input_mul, input_sub), -1) return output self.get_pairwise_feature_vector = Lambda(concat_pairs) self.predict_contact_map = Stack() self.predict_contact_map.add(Conv2D(32, 1, use_bias=True, padding='same', activation='relu')) self.predict_contact_map.add(Conv2D(1, 7, use_bias=True, padding='same', activation='linear'))
Example #6
Source File: se.py From keras-squeeze-excite-network with MIT License | 6 votes |
def spatial_squeeze_excite_block(input_tensor): """ Create a spatial squeeze-excite block Args: input_tensor: input Keras tensor Returns: a Keras tensor References - [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) """ se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False, kernel_initializer='he_normal')(input_tensor) x = multiply([input_tensor, se]) return x
Example #7
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 6 votes |
def __init__(self, filters, # NOTE: will be filters // 2 norm_type="instance", pad_type="constant", **kwargs): super(BasicShuffleUnitV2, self).__init__(name="BasicShuffleUnitV2") filters //= 2 self.model = tf.keras.models.Sequential([ Conv2D(filters, 1, use_bias=False), get_norm(norm_type), ReLU(), DepthwiseConv2D(3, padding='same', use_bias=False), get_norm(norm_type), Conv2D(filters, 1, use_bias=False), get_norm(norm_type), ReLU(), ])
Example #8
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 6 votes |
def __init__(self, filters, kernel_size, stride=1, norm_type="instance", pad_type="constant", **kwargs): super(ConvBlock, self).__init__(name="ConvBlock") padding = (kernel_size - 1) // 2 padding = (padding, padding) self.model = tf.keras.models.Sequential() self.model.add(get_padding(pad_type, padding)) self.model.add(Conv2D(filters, kernel_size, stride)) self.model.add(get_padding(pad_type, padding)) self.model.add(Conv2D(filters, kernel_size)) self.model.add(get_norm(norm_type)) self.model.add(ReLU())
Example #9
Source File: dropout_unet.py From bcnn with MIT License | 6 votes |
def up_stage(inputs, skip, filters, kernel_size=3, activation="relu", padding="SAME"): up = UpSampling2D()(inputs) up = Conv2D(filters, 2, activation=activation, padding=padding)(up) up = GroupNormalization()(up) merge = concatenate([skip, up]) merge = GroupNormalization()(merge) conv = Conv2D(filters, kernel_size, activation=activation, padding=padding)(merge) conv = GroupNormalization()(conv) conv = Conv2D(filters, kernel_size, activation=activation, padding=padding)(conv) conv = GroupNormalization()(conv) conv = SpatialDropout2D(0.5)(conv, training=True) return conv
Example #10
Source File: train.py From object-localization with MIT License | 6 votes |
def create_model(trainable=False): model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA) # to freeze layers for layer in model.layers: layer.trainable = trainable out = model.layers[-1].output x = Conv2D(4, kernel_size=3)(out) x = Reshape((4,), name="coords")(x) y = GlobalAveragePooling2D()(out) y = Dense(CLASSES, name="classes", activation="softmax")(y) return Model(inputs=model.input, outputs=[x, y])
Example #11
Source File: atari_model.py From tf2rl with MIT License | 6 votes |
def __init__(self, state_shape, action_dim, units=None, name="AtariCategoricalActorCritic"): tf.keras.Model.__init__(self, name=name) self.dist = Categorical(dim=action_dim) self.action_dim = action_dim self.conv1 = Conv2D(32, kernel_size=(8, 8), strides=(4, 4), padding='valid', activation='relu') self.conv2 = Conv2D(64, kernel_size=(4, 4), strides=(2, 2), padding='valid', activation='relu') self.conv3 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu') self.flat = Flatten() self.fc1 = Dense(512, activation='relu') self.prob = Dense(action_dim, activation='softmax') self.v = Dense(1, activation="linear") self(tf.constant( np.zeros(shape=(1,)+state_shape, dtype=np.float32)))
Example #12
Source File: train.py From object-localization with MIT License | 6 votes |
def create_model(trainable=False): model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet") for layer in model.layers: layer.trainable = trainable block = model.get_layer("block_16_project_BN").output x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block) x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x) model = Model(inputs=model.input, outputs=x) # divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight # see https://arxiv.org/pdf/1711.05101.pdf regularizer = l2(WEIGHT_DECAY / 2) for weight in model.trainable_weights: with tf.keras.backend.name_scope("weight_regularizer"): model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight) return model
Example #13
Source File: run.py From polyaxon-examples with Apache License 2.0 | 6 votes |
def get_model(args): model = models.Sequential() model.add( layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation)) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation)) model.add(layers.Dropout(args.dropout)) model.add(layers.Flatten()) model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation)) model.add(layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate), loss=args.loss, metrics=['accuracy']) return model
Example #14
Source File: convert_gl2tf2_conv2d.py From imgclsmob with MIT License | 6 votes |
def __init__(self, data_format="channels_last", **kwargs): super(TF2Model, self).__init__(**kwargs) padding = (3, 3) if isinstance(padding, int): padding = (padding, padding) if is_channels_first(data_format): self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)] else: self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]] self.conv = nn.Conv2D( filters=64, kernel_size=(7, 7), strides=2, padding="valid", data_format=data_format, dilation_rate=1, use_bias=False, name="conv")
Example #15
Source File: convert_gl2tf2_conv2d.py From imgclsmob with MIT License | 6 votes |
def __init__(self, data_format="channels_last", **kwargs): super(TF2Model2, self).__init__(**kwargs) padding = (3, 3) if isinstance(padding, int): padding = (padding, padding) self.pad = nn.ZeroPadding2D( padding=padding, data_format=data_format) self.conv = nn.Conv2D( filters=64, kernel_size=(7, 7), strides=2, padding="valid", data_format=data_format, dilation_rate=1, use_bias=False, name="conv")
Example #16
Source File: inception_resnet_v1.py From TripletLossFace with MIT License | 6 votes |
def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): x = Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)(x) if not use_bias: bn_axis = 1 if K.image_data_format() == 'channels_first' else 3 bn_name = _generate_layer_name('BatchNorm', prefix=name) x = BatchNormalization(axis=bn_axis, momentum=0.995, epsilon=0.001, scale=False, name=bn_name)(x) if activation is not None: ac_name = _generate_layer_name('Activation', prefix=name) x = Activation(activation, name=ac_name)(x) return x
Example #17
Source File: cyclegan-7.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def encoder_layer(inputs, filters=16, kernel_size=3, strides=2, activation='relu', instance_norm=True): """Builds a generic encoder layer made of Conv2D-IN-LeakyReLU IN is optional, LeakyReLU may be replaced by ReLU """ conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same') x = inputs if instance_norm: x = InstanceNormalization()(x) if activation == 'relu': x = Activation('relu')(x) else: x = LeakyReLU(alpha=0.2)(x) x = conv(x) return x
Example #18
Source File: CNN.py From nn_builder with MIT License | 6 votes |
def create_and_append_layer(self, layer, list_to_append_layer_to, activation=None, output_layer=False): """Creates and appends a layer to the list provided""" layer_name = layer[0].lower() assert layer_name in self.valid_cnn_hidden_layer_types, "Layer name {} not valid, use one of {}".format( layer_name, self.valid_cnn_hidden_layer_types) if layer_name == "conv": list_to_append_layer_to.extend([Conv2D(filters=layer[1], kernel_size=layer[2], strides=layer[3], padding=layer[4], activation=activation, kernel_initializer=self.initialiser_function)]) elif layer_name == "maxpool": list_to_append_layer_to.extend([MaxPool2D(pool_size=(layer[1], layer[1]), strides=(layer[2], layer[2]), padding=layer[3])]) elif layer_name == "avgpool": list_to_append_layer_to.extend([AveragePooling2D(pool_size=(layer[1], layer[1]), strides=(layer[2], layer[2]), padding=layer[3])]) elif layer_name == "linear": list_to_append_layer_to.extend([Dense(layer[1], activation=activation, kernel_initializer=self.initialiser_function)]) else: raise ValueError("Wrong layer name")
Example #19
Source File: model.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def conv_layer(inputs, filters=32, kernel_size=3, strides=1, use_maxpool=True, postfix=None, activation=None): """Helper function to build Conv2D-BN-ReLU layer with optional MaxPooling2D. """ x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, kernel_initializer='he_normal', name="conv_"+postfix, padding='same')(inputs) x = BatchNormalization(name="bn_"+postfix)(x) x = Activation('relu', name='relu_'+postfix)(x) if use_maxpool: x = MaxPooling2D(name='pool'+postfix)(x) return x
Example #20
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 6 votes |
def __init__(self, filters, # NOTE: will be filters // 2 norm_type="instance", pad_type="constant", **kwargs): super(DownShuffleUnitV2, self).__init__(name="DownShuffleUnitV2") filters //= 2 self.r_model = tf.keras.models.Sequential([ Conv2D(filters, 1, use_bias=False), get_norm(norm_type), ReLU(), DepthwiseConv2D(3, 2, 'same', use_bias=False), get_norm(norm_type), Conv2D(filters, 1, use_bias=False), ]) self.l_model = tf.keras.models.Sequential([ DepthwiseConv2D(3, 2, 'same', use_bias=False), get_norm(norm_type), Conv2D(filters, 1, use_bias=False), ]) self.bn_act = tf.keras.models.Sequential([ get_norm(norm_type), ReLU(), ])
Example #21
Source File: model.py From DexiNed with MIT License | 6 votes |
def __init__(self, mid_features, out_features=None, stride=(1,1), use_bn=True,use_act=True,**kwargs): super(DoubleConvBlock, self).__init__(**kwargs) self.use_bn =use_bn self.use_act =use_act out_features = mid_features if out_features is None else out_features k_reg = None if w_decay is None else l2(w_decay) self.conv1 = layers.Conv2D( filters=mid_features, kernel_size=(3, 3), strides=stride, padding='same', use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg) self.bn1 = layers.BatchNormalization() self.conv2 = layers.Conv2D( filters=out_features, kernel_size=(3, 3), padding='same',strides=(1,1), use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg) self.bn2 = layers.BatchNormalization() self.relu = layers.ReLU()
Example #22
Source File: convert_test.py From tf-encrypted with Apache License 2.0 | 6 votes |
def _keras_conv2d_core(shape=None, data=None): assert shape is None or data is None if shape is None: shape = data.shape init = tf.keras.initializers.RandomNormal(seed=1) model = Sequential() c2d = Conv2D( 2, (3, 3), data_format="channels_last", use_bias=False, kernel_initializer=init, input_shape=shape[1:], ) model.add(c2d) if data is None: data = np.random.uniform(size=shape) out = model.predict(data) return model, out
Example #23
Source File: cifar_tf_example.py From ray with Apache License 2.0 | 5 votes |
def create_model(config): import tensorflow as tf model = Sequential() model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape)) model.add(Activation("relu")) model.add(Conv2D(32, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(Conv2D(64, (3, 3))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(64)) model.add(Activation("relu")) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation("softmax")) # initiate RMSprop optimizer opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6) # Let"s train the model using RMSprop model.compile( loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) return model
Example #24
Source File: dcgan-mnist-4.2.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def build_discriminator(inputs): """Build a Discriminator Model Stack of LeakyReLU-Conv2D to discriminate real from fake. The network does not converge with BN so it is not used here unlike in [1] or original paper. Arguments: inputs (Layer): Input layer of the discriminator (the image) Returns: discriminator (Model): Discriminator Model """ kernel_size = 5 layer_filters = [32, 64, 128, 256] x = inputs for filters in layer_filters: # first 3 convolution layers use strides = 2 # last one uses strides = 1 if filters == layer_filters[-1]: strides = 1 else: strides = 2 x = LeakyReLU(alpha=0.2)(x) x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x) x = Flatten()(x) x = Dense(1)(x) x = Activation('sigmoid')(x) discriminator = Model(inputs, x, name='discriminator') return discriminator
Example #25
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 5 votes |
def __init__(self, filters, kernel_size, norm_type="instance", pad_type="constant", **kwargs): super(FlatConv, self).__init__(name="FlatConv") padding = (kernel_size - 1) // 2 padding = (padding, padding) self.model = tf.keras.models.Sequential() self.model.add(get_padding(pad_type, padding)) self.model.add(Conv2D(filters, kernel_size)) self.model.add(get_norm(norm_type)) self.model.add(ReLU())
Example #26
Source File: mnist_cifar_models.py From CROWN-IBP with BSD 2-Clause "Simplified" License | 5 votes |
def get_model_meta(filename): print("Loading model " + filename) global use_tf_keras global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K try: from keras.models import load_model as load_model_keras ret = get_model_meta_real(filename, load_model_keras) # model is successfully loaded. Import layers from keras from keras.models import Sequential from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda from keras.layers import Conv2D, MaxPooling2D from keras.layers import LeakyReLU from keras import regularizers from keras import backend as K print("Model imported using keras") except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError): raise except: print("Failed to load model with keras. Trying tf.keras...") use_tf_keras = True from tensorflow.keras.models import load_model as load_model_tf ret = get_model_meta_real(filename, load_model_tf) # model is successfully loaded. Import layers from tensorflow.keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import LeakyReLU from tensorflow.keras import regularizers from tensorflow.keras import backend as K print("Model imported using tensorflow.keras") # put imported functions in global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \ Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K return ret
Example #27
Source File: cyclegan-7.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def decoder_layer(inputs, paired_inputs, filters=16, kernel_size=3, strides=2, activation='relu', instance_norm=True): """Builds a generic decoder layer made of Conv2D-IN-LeakyReLU IN is optional, LeakyReLU may be replaced by ReLU Arguments: (partial) inputs (tensor): the decoder layer input paired_inputs (tensor): the encoder layer output provided by U-Net skip connection & concatenated to inputs. """ conv = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding='same') x = inputs if instance_norm: x = InstanceNormalization()(x) if activation == 'relu': x = Activation('relu')(x) else: x = LeakyReLU(alpha=0.2)(x) x = conv(x) x = concatenate([x, paired_inputs]) return x
Example #28
Source File: vgg.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def make_layers(cfg, inputs, batch_norm=True, in_channels=1): """Helper function to ease the creation of VGG network model Arguments: cfg (dict): Summarizes the network layer configuration inputs (tensor): Input from previous layer batch_norm (Bool): Whether to use batch norm between Conv2D and ReLU in_channel (int): Number of input channels """ x = inputs for layer in cfg: if layer == 'M': x = MaxPooling2D()(x) elif layer == 'A': x = AveragePooling2D(pool_size=3)(x) else: x = Conv2D(layer, kernel_size=3, padding='same', kernel_initializer='he_normal' )(x) if batch_norm: x = BatchNormalization()(x) x = Activation('relu')(x) return x
Example #29
Source File: model.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def conv2d(inputs, filters=32, kernel_size=3, strides=1, name=None): conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, kernel_initializer='he_normal', name=name, padding='same') return conv(inputs)
Example #30
Source File: convert_test.py From tf-encrypted with Apache License 2.0 | 5 votes |
def split_edge_case_builder(input_shape, filters=2, kernel_size=3): init = tf.keras.initializers.RandomNormal(seed=1) x = tf.keras.Input(shape=input_shape[1:]) y1, y2 = tf.keras.layers.Lambda( lambda tensor: tf.split(tensor, num_or_size_splits=2, axis=-1) )(x) y = tf.keras.layers.Conv2D( filters, kernel_size, kernel_initializer=init, use_bias=True, padding="same" )(y2) y = tf.keras.layers.Concatenate(axis=-1)([y1, y]) return tf.keras.Model(x, y)