Python tensorflow.keras.layers.Reshape() Examples
The following are 30
code examples of tensorflow.keras.layers.Reshape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.layers
, or try the search function
.
Example #1
Source File: decoders.py From AirSim-Drone-Racing-VAE-Imitation with MIT License | 7 votes |
def create_model(self): print('[ImgDecoder] Starting create_model') dense = Dense(units=1024, name='p_img_dense') reshape = Reshape((1, 1, 1024)) # for 64x64 img deconv1 = Conv2DTranspose(filters=128, kernel_size=4, strides=1, padding='valid', activation='relu') deconv2 = Conv2DTranspose(filters=64, kernel_size=5, strides=1, padding='valid', activation='relu', dilation_rate=3) deconv3 = Conv2DTranspose(filters=64, kernel_size=6, strides=1, padding='valid', activation='relu', dilation_rate=2) deconv4 = Conv2DTranspose(filters=32, kernel_size=5, strides=2, padding='valid', activation='relu', dilation_rate=1) deconv5 = Conv2DTranspose(filters=16, kernel_size=5, strides=1, padding='valid', activation='relu', dilation_rate=1) # deconv6 = Conv2DTranspose(filters=8, kernel_size=6, strides=2, padding='valid', activation='relu') deconv7 = Conv2DTranspose(filters=3, kernel_size=6, strides=1, padding='valid', activation='tanh') self.network = tf.keras.Sequential([ dense, reshape, deconv1, deconv2, deconv3, deconv4, deconv5, deconv7], name='p_img') print('[ImgDecoder] Done with create_model')
Example #2
Source File: osnet.py From keras_imagenet with MIT License | 7 votes |
def get_aggregation_gate(in_filters, reduction=16): """Get the "aggregation gate (AG)" op. # Arguments reduction: channel reduction for the hidden layer. # Returns The AG op (a models.Sequential module). """ gate = models.Sequential() gate.add(layers.GlobalAveragePooling2D()) gate.add(layers.Dense(in_filters // reduction, use_bias=False)) gate.add(layers.BatchNormalization()) gate.add(layers.Activation('relu')) gate.add(layers.Dense(in_filters)) gate.add(layers.Activation('sigmoid')) gate.add(layers.Reshape((1, 1, -1))) # reshape as (H, W, C) return gate
Example #3
Source File: keras_layers.py From DeepPavlov with Apache License 2.0 | 7 votes |
def expand_tile(units, axis): """ Expand and tile tensor along given axis Args: units: tf tensor with dimensions [batch_size, time_steps, n_input_features] axis: axis along which expand and tile. Must be 1 or 2 """ assert axis in (1, 2) n_time_steps = K.int_shape(units)[1] repetitions = [1, 1, 1, 1] repetitions[axis] = n_time_steps if axis == 1: expanded = Reshape(target_shape=((1,) + K.int_shape(units)[1:]))(units) else: expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units) return K.tile(expanded, repetitions)
Example #4
Source File: yolo3_nano.py From keras-YOLOv3-model-set with MIT License | 6 votes |
def _fca_block(inputs, reduct_ratio, block_id): in_channels = inputs.shape.as_list()[-1] #in_shapes = inputs.shape.as_list()[1:3] reduct_channels = int(in_channels // reduct_ratio) prefix = 'fca_block_{}_'.format(block_id) x = GlobalAveragePooling2D(name=prefix + 'average_pooling')(inputs) x = Dense(reduct_channels, activation='relu', name=prefix + 'fc1')(x) x = Dense(in_channels, activation='sigmoid', name=prefix + 'fc2')(x) x = Reshape((1,1,in_channels),name='reshape')(x) x = Multiply(name=prefix + 'multiply')([x, inputs]) return x
Example #5
Source File: mobilenet_v3.py From keras-YOLOv3-model-set with MIT License | 6 votes |
def _se_block(inputs, filters, se_ratio, prefix): x = GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs) if K.image_data_format() == 'channels_first': x = Reshape((filters, 1, 1))(x) else: x = Reshape((1, 1, filters))(x) x = Conv2D(_depth(filters * se_ratio), kernel_size=1, padding='same', name=prefix + 'squeeze_excite/Conv')(x) x = ReLU(name=prefix + 'squeeze_excite/Relu')(x) x = Conv2D(filters, kernel_size=1, padding='same', name=prefix + 'squeeze_excite/Conv_1')(x) x = Activation(hard_sigmoid)(x) #if K.backend() == 'theano': ## For the Theano backend, we have to explicitly make ## the excitation weights broadcastable. #x = Lambda( #lambda br: K.pattern_broadcast(br, [True, True, True, False]), #output_shape=lambda input_shape: input_shape, #name=prefix + 'squeeze_excite/broadcast')(x) x = Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x]) return x
Example #6
Source File: train.py From object-localization with MIT License | 6 votes |
def create_model(trainable=False): model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA) # to freeze layers for layer in model.layers: layer.trainable = trainable out = model.layers[-1].output x = Conv2D(4, kernel_size=3)(out) x = Reshape((4,), name="coords")(x) y = GlobalAveragePooling2D()(out) y = Dense(CLASSES, name="classes", activation="softmax")(y) return Model(inputs=model.input, outputs=[x, y])
Example #7
Source File: test_generator_evaluator.py From deepchem with MIT License | 6 votes |
def test_compute_model_performance_multitask_classifier(self): n_data_points = 20 n_features = 1 n_tasks = 2 n_classes = 2 X = np.ones(shape=(n_data_points // 2, n_features)) * -1 X1 = np.ones(shape=(n_data_points // 2, n_features)) X = np.concatenate((X, X1)) class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))]) class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))]) y1 = np.concatenate((class_0, class_1)) y2 = np.concatenate((class_1, class_0)) y = np.stack([y1, y2], axis=1) dataset = NumpyDataset(X, y) features = layers.Input(shape=(n_data_points // 2, n_features)) dense = layers.Dense(n_tasks * n_classes)(features) logits = layers.Reshape((n_tasks, n_classes))(dense) output = layers.Softmax()(logits) keras_model = tf.keras.Model(inputs=features, outputs=[output, logits]) model = dc.models.KerasModel( keras_model, dc.models.losses.SoftmaxCrossEntropy(), output_types=['prediction', 'loss'], learning_rate=0.01, batch_size=n_data_points) model.fit(dataset, nb_epoch=1000) metric = dc.metrics.Metric( dc.metrics.roc_auc_score, np.mean, mode="classification") scores = model.evaluate_generator( model.default_generator(dataset), [metric], per_task_metrics=True) scores = list(scores[1].values()) # Loosening atol to see if tests stop failing sporadically assert np.all(np.isclose(scores, [1.0, 1.0], atol=0.50))
Example #8
Source File: SEResNeXt.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def squeeze_excitation_layer(self, x, out_dim): ''' SE module performs inter-channel weighting. ''' squeeze = GlobalAveragePooling2D()(x) excitation = Dense(units=out_dim // self.ratio)(squeeze) excitation = self.activation(excitation) excitation = Dense(units=out_dim)(excitation) excitation = self.activation(excitation, 'sigmoid') excitation = Reshape((1,1,out_dim))(excitation) scale = multiply([x,excitation]) return scale
Example #9
Source File: squeeze_excitation.py From DeepPoseKit with Apache License 2.0 | 5 votes |
def channel_squeeze_excite_block(input, ratio=0.25): init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = init._keras_shape[channel_axis] cse_shape = (1, 1, filters) cse = layers.GlobalAveragePooling2D()(init) cse = layers.Reshape(cse_shape)(cse) ratio_filters = int(np.round(filters * ratio)) if ratio_filters < 1: ratio_filters += 1 cse = layers.Conv2D( ratio_filters, (1, 1), padding="same", activation="relu", kernel_initializer="he_normal", use_bias=False, )(cse) cse = layers.BatchNormalization()(cse) cse = layers.Conv2D( filters, (1, 1), activation="sigmoid", kernel_initializer="he_normal", use_bias=False, )(cse) if K.image_data_format() == "channels_first": cse = layers.Permute((3, 1, 2))(cse) cse = layers.Multiply()([init, cse]) return cse
Example #10
Source File: yamnet.py From models with Apache License 2.0 | 5 votes |
def yamnet(features): """Define the core YAMNet mode in Keras.""" net = layers.Reshape( (params.PATCH_FRAMES, params.PATCH_BANDS, 1), input_shape=(params.PATCH_FRAMES, params.PATCH_BANDS))(features) for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS): net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters)(net) net = layers.GlobalAveragePooling2D()(net) logits = layers.Dense(units=params.NUM_CLASSES, use_bias=True)(net) predictions = layers.Activation( name=params.EXAMPLE_PREDICTIONS_LAYER_NAME, activation=params.CLASSIFIER_ACTIVATION)(logits) return predictions
Example #11
Source File: model.py From EfficientDet with Apache License 2.0 | 5 votes |
def __init__(self, width, depth, num_anchors=9, separable_conv=True, freeze_bn=False, detect_quadrangle=False, **kwargs): super(BoxNet, self).__init__(**kwargs) self.width = width self.depth = depth self.num_anchors = num_anchors self.separable_conv = separable_conv self.detect_quadrangle = detect_quadrangle num_values = 9 if detect_quadrangle else 4 options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'bias_initializer': 'zeros', } if separable_conv: kernel_initializer = { 'depthwise_initializer': initializers.VarianceScaling(), 'pointwise_initializer': initializers.VarianceScaling(), } options.update(kernel_initializer) self.convs = [layers.SeparableConv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)] self.head = layers.SeparableConv2D(filters=num_anchors * num_values, name=f'{self.name}/box-predict', **options) else: kernel_initializer = { 'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None) } options.update(kernel_initializer) self.convs = [layers.Conv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)] self.head = layers.Conv2D(filters=num_anchors * num_values, name=f'{self.name}/box-predict', **options) self.bns = [ [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)] for i in range(depth)] # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)] # for i in range(depth)] self.relu = layers.Lambda(lambda x: tf.nn.swish(x)) self.reshape = layers.Reshape((-1, num_values)) self.level = 0
Example #12
Source File: train_keras_model.py From gym-2048 with MIT License | 5 votes |
def build_model(board_size=4, board_layers=16, outputs=4, filters=64, residual_blocks=4): # Functional API model inputs = layers.Input(shape=(board_size * board_size * board_layers,)) x = layers.Reshape((board_size, board_size, board_layers))(inputs) # Initial convolutional block x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) # residual blocks for i in range(residual_blocks): # x at the start of a block temp_x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x) temp_x = layers.BatchNormalization()(temp_x) temp_x = layers.Activation('relu')(temp_x) temp_x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(temp_x) temp_x = layers.BatchNormalization()(temp_x) x = layers.add([x, temp_x]) x = layers.Activation('relu')(x) # policy head x = layers.Conv2D(filters=2, kernel_size=(1, 1), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.Flatten()(x) predictions = layers.Dense(outputs, activation='softmax')(x) # Create model return models.Model(inputs=inputs, outputs=predictions)
Example #13
Source File: conv_models.py From bootcamp with Apache License 2.0 | 5 votes |
def __init__(self, batch_input_shape=(None, NUM_FRAMES, NUM_FBANKS, 1), include_softmax=False, num_speakers_softmax=None): self.include_softmax = include_softmax if self.include_softmax: assert num_speakers_softmax > 0 self.clipped_relu_count = 0 # http://cs231n.github.io/convolutional-networks/ # conv weights # #params = ks * ks * nb_filters * num_channels_input # Conv128-s # 5*5*128*128/2+128 # ks*ks*nb_filters*channels/strides+bias(=nb_filters) # take 100 ms -> 4 frames. # if signal is 3 seconds, then take 100ms per 100ms and average out this network. # 8*8 = 64 features. # used to share all the layers across the inputs # num_frames = K.shape() - do it dynamically after. inputs = Input(batch_shape=batch_input_shape, name='input') x = self.cnn_component(inputs) x = Reshape((-1, 2048))(x) # Temporal average layer. axis=1 is time. x = Lambda(lambda y: K.mean(y, axis=1), name='average')(x) if include_softmax: logger.info('Including a Dropout layer to reduce overfitting.') # used for softmax because the dataset we pre-train on might be too small. easy to overfit. x = Dropout(0.5)(x) x = Dense(512, name='affine')(x) if include_softmax: # Those weights are just when we train on softmax. x = Dense(num_speakers_softmax, activation='softmax')(x) else: # Does not contain any weights. x = Lambda(lambda y: K.l2_normalize(y, axis=1), name='ln')(x) self.m = Model(inputs, x, name='ResCNN')
Example #14
Source File: mobilenet_v3_small.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def build(self, plot=False): """build MobileNetV3 Small. # Arguments plot: Boolean, weather to plot model. # Returns model: Model, model. """ inputs = Input(shape=self.shape) x = self._conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS') x = self._bottleneck(x, 16, (3, 3), e=16, s=2, squeeze=True, nl='RE') x = self._bottleneck(x, 24, (3, 3), e=72, s=2, squeeze=False, nl='RE') x = self._bottleneck(x, 24, (3, 3), e=88, s=1, squeeze=False, nl='RE') x = self._bottleneck(x, 40, (5, 5), e=96, s=2, squeeze=True, nl='HS') x = self._bottleneck(x, 40, (5, 5), e=240, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 40, (5, 5), e=240, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 48, (5, 5), e=120, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 48, (5, 5), e=144, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 96, (5, 5), e=288, s=2, squeeze=True, nl='HS') x = self._bottleneck(x, 96, (5, 5), e=576, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 96, (5, 5), e=576, s=1, squeeze=True, nl='HS') x = self._conv_block(x, 576, (1, 1), strides=(1, 1), nl='HS') x = GlobalAveragePooling2D()(x) x = Reshape((1, 1, 576))(x) x = Conv2D(1280, (1, 1), padding='same')(x) x = self._return_activation(x, 'HS') x = Conv2D(self.n_class, (1, 1), padding='same', activation='softmax')(x) output = Reshape((self.n_class,))(x) model = Model(inputs, output) #if plot: # plot_model(model, to_file='images/MobileNetv3_small.png', show_shapes=True) return model
Example #15
Source File: ConvDEC.py From DEC-DA with MIT License | 5 votes |
def CAE(input_shape=(28, 28, 1), filters=[32, 64, 128, 10]): model = Sequential() if input_shape[0] % 8 == 0: pad3 = 'same' else: pad3 = 'valid' model.add(InputLayer(input_shape)) model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1')) model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2')) model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3')) model.add(Flatten()) model.add(Dense(units=filters[3], name='embedding')) model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu')) model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2]))) model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3')) model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2')) model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1')) encoder = Model(inputs=model.input, outputs=model.get_layer('embedding').output) return model, encoder
Example #16
Source File: train.py From object-localization with MIT License | 5 votes |
def create_model(trainable=False): model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA) # to freeze layers for layer in model.layers: layer.trainable = trainable x = model.layers[-1].output x = Conv2D(4, kernel_size=3, name="coords")(x) x = Reshape((4,))(x) return Model(inputs=model.input, outputs=x)
Example #17
Source File: mobilenet_base.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def _squeeze(self, inputs): """Squeeze and Excitation. This function defines a squeeze structure. # Arguments inputs: Tensor, input tensor of conv layer. """ input_channels = int(inputs.shape[-1]) x = GlobalAveragePooling2D()(inputs) x = Dense(input_channels, activation='relu')(x) x = Dense(input_channels, activation='hard_sigmoid')(x) x = Reshape((1, 1, input_channels))(x) return x
Example #18
Source File: se.py From keras-squeeze-excite-network with MIT License | 5 votes |
def squeeze_excite_block(input_tensor, ratio=16): """ Create a channel-wise squeeze-excite block Args: input_tensor: input Keras tensor ratio: number of output filters Returns: a Keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = _tensor_shape(init)[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #19
Source File: se.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def squeeze_excite_block(input, ratio=16): ''' Create a channel-wise squeeze-excite block Args: input: input tensor filters: number of output filters Returns: a keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) ''' init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = init._keras_shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #20
Source File: chemnet_models.py From deepchem with MIT License | 5 votes |
def _build_graph(self): smile_images = Input(shape=self.input_shape) stem = chemnet_layers.Stem(self.base_filters)(smile_images) inceptionA_out = self.build_inception_module(inputs=stem, type="A") reductionA_out = chemnet_layers.ReductionA( self.base_filters)(inceptionA_out) inceptionB_out = self.build_inception_module( inputs=reductionA_out, type="B") reductionB_out = chemnet_layers.ReductionB( self.base_filters)(inceptionB_out) inceptionC_out = self.build_inception_module( inputs=reductionB_out, type="C") avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out) if self.mode == "classification": logits = Dense(self.n_tasks * self.n_classes)(avg_pooling_out) logits = Reshape((self.n_tasks, self.n_classes))(logits) if self.n_classes == 2: output = Activation(activation='sigmoid')(logits) loss = SigmoidCrossEntropy() else: output = Softmax()(logits) loss = SoftmaxCrossEntropy() outputs = [output, logits] output_types = ['prediction', 'loss'] else: output = Dense(self.n_tasks * 1)(avg_pooling_out) output = Reshape((self.n_tasks, 1))(output) outputs = [output] output_types = ['prediction'] loss = L2Loss() model = tf.keras.Model(inputs=[smile_images], outputs=outputs) return model, loss, output_types
Example #21
Source File: load_keras_model.py From checkmate with Apache License 2.0 | 5 votes |
def testBertModel(num_layers, heads, input_size): hidden_size = input_size[1] intermediate_size = 4 * hidden_size seq_length = input_size[0] num_layers = num_layers inputs = keras.Input(shape=(input_size)) x = inputs for i in range(num_layers): query = Dense(hidden_size, name="query_{}".format(i))(x) key = Dense(hidden_size, name="key_{}".format(i))(x) value = Dense(hidden_size, name="value_{}".format(i))(x) query = Reshape((heads, seq_length, hidden_size // heads))(query) key = Reshape((heads, hidden_size // heads, seq_length))(key) value = Reshape((heads, seq_length, hidden_size // heads))(value) acts = Lambda(lambda x: tf.matmul(x[0], x[1]), name="acts_{}".format(i))([query, key]) fin = Lambda(lambda x: tf.matmul(x[0], x[1]), name="fin_{}".format(i))([acts, value]) fin = Reshape((seq_length, hidden_size))(fin) # layer.append(TFBertSelfAttention(config, name="layer_{}".format(i))) att = Dense(hidden_size, name="att_{}".format(i))(fin) relu = Activation("relu", name="relu0_{}".format(i))(att) x = LayerNormalization(name="f_att_{}".format(i))(relu + x) inter = Dense(intermediate_size, name="inter_{}".format(i))(x) relu1 = Activation("relu", name="relu1_{}".format(i))(inter) shrink = Dense(hidden_size, name="shrink_{}".format(i))(relu1) relu2 = Activation("relu", name="relu2_{}".format(i))(shrink) x = LayerNormalization(name="layer_out_{}".format(i))(x + relu2) return keras.Model(inputs=inputs, outputs=x)
Example #22
Source File: model.py From EfficientDet with Apache License 2.0 | 4 votes |
def __init__(self, width, depth, num_classes=20, num_anchors=9, separable_conv=True, freeze_bn=False, **kwargs): super(ClassNet, self).__init__(**kwargs) self.width = width self.depth = depth self.num_classes = num_classes self.num_anchors = num_anchors self.separable_conv = separable_conv options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', } if self.separable_conv: kernel_initializer = { 'depthwise_initializer': initializers.VarianceScaling(), 'pointwise_initializer': initializers.VarianceScaling(), } options.update(kernel_initializer) self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}', **options) for i in range(depth)] self.head = layers.SeparableConv2D(filters=num_classes * num_anchors, bias_initializer=PriorProbability(probability=0.01), name=f'{self.name}/class-predict', **options) else: kernel_initializer = { 'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None) } options.update(kernel_initializer) self.convs = [layers.Conv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}', **options) for i in range(depth)] self.head = layers.Conv2D(filters=num_classes * num_anchors, bias_initializer=PriorProbability(probability=0.01), name='class-predict', **options) self.bns = [ [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)] for i in range(depth)] # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)] # for i in range(depth)] self.relu = layers.Lambda(lambda x: tf.nn.swish(x)) self.reshape = layers.Reshape((-1, num_classes)) self.activation = layers.Activation('sigmoid') self.level = 0
Example #23
Source File: mobilenet_v3_large.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 4 votes |
def build(self, plot=False): """build MobileNetV3 Large. # Arguments plot: Boolean, weather to plot model. # Returns model: Model, model. """ inputs = Input(shape=self.shape) x = self._conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS') x = self._bottleneck(x, 16, (3, 3), e=16, s=1, squeeze=False, nl='RE') x = self._bottleneck(x, 24, (3, 3), e=64, s=2, squeeze=False, nl='RE') x = self._bottleneck(x, 24, (3, 3), e=72, s=1, squeeze=False, nl='RE') x = self._bottleneck(x, 40, (5, 5), e=72, s=2, squeeze=True, nl='RE') x = self._bottleneck(x, 40, (5, 5), e=120, s=1, squeeze=True, nl='RE') x = self._bottleneck(x, 40, (5, 5), e=120, s=1, squeeze=True, nl='RE') x = self._bottleneck(x, 80, (3, 3), e=240, s=2, squeeze=False, nl='HS') x = self._bottleneck(x, 80, (3, 3), e=200, s=1, squeeze=False, nl='HS') x = self._bottleneck(x, 80, (3, 3), e=184, s=1, squeeze=False, nl='HS') x = self._bottleneck(x, 80, (3, 3), e=184, s=1, squeeze=False, nl='HS') x = self._bottleneck(x, 112, (3, 3), e=480, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 112, (3, 3), e=672, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 160, (5, 5), e=672, s=2, squeeze=True, nl='HS') x = self._bottleneck(x, 160, (5, 5), e=960, s=1, squeeze=True, nl='HS') x = self._bottleneck(x, 160, (5, 5), e=960, s=1, squeeze=True, nl='HS') x = self._conv_block(x, 960, (1, 1), strides=(1, 1), nl='HS') x = GlobalAveragePooling2D()(x) x = Reshape((1, 1, 960))(x) x = Conv2D(1280, (1, 1), padding='same')(x) x = self._return_activation(x, 'HS') x = Conv2D(self.n_class, (1, 1), padding='same', activation='softmax')(x) output = Reshape((self.n_class,))(x) model = Model(inputs, output) #if plot: # plot_model(model, to_file='images/MobileNetv3_large.png', show_shapes=True) return model
Example #24
Source File: resnet.py From keras-tuner with Apache License 2.0 | 4 votes |
def block3(x, filters, kernel_size=3, stride=1, groups=32, conv_shortcut=True, name=None): """A residual block. # Arguments x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. groups: default 32, group size for grouped convolution. conv_shortcut: default True, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. # Returns Output tensor for the residual block. """ bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 if conv_shortcut is True: shortcut = layers.Conv2D((64 // groups) * filters, 1, strides=stride, use_bias=False, name=name + '_0_conv')(x) shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut) else: shortcut = x x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x) x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x) x = layers.Activation('relu', name=name + '_1_relu')(x) c = filters // groups x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) x = layers.DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c, use_bias=False, name=name + '_2_conv')(x) x_shape = backend.int_shape(x)[1:-1] x = layers.Reshape(x_shape + (groups, c, c))(x) output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None x = layers.Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]), output_shape=output_shape, name=name + '_2_reduce')(x) x = layers.Reshape(x_shape + (filters,))(x) x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x) x = layers.Activation('relu', name=name + '_2_relu')(x) x = layers.Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x) x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x) x = layers.Add(name=name + '_add')([shortcut, x]) x = layers.Activation('relu', name=name + '_out')(x) return x
Example #25
Source File: networks.py From brainstorm with MIT License | 4 votes |
def randflow_model(img_shape, model, model_name='randflow_model', flow_sigma=None, flow_amp=None, blur_sigma=5, interp_mode='linear', indexing='xy', ): n_dims = len(img_shape) - 1 x_in = Input(img_shape, name='img_input_randwarp') if n_dims == 3: flow = MaxPooling3D(2)(x_in) flow = MaxPooling3D(2)(flow) blur_sigma = int(np.ceil(blur_sigma / 4.)) flow_shape = tuple([int(s/4) for s in img_shape[:-1]] + [n_dims]) else: flow = x_in flow_shape = img_shape[:-1] + (n_dims,) # random flow field if flow_amp is None: flow = RandFlow(name='randflow', img_shape=flow_shape, blur_sigma=blur_sigma, flow_sigma=flow_sigma)(flow) elif flow_sigma is None: flow = RandFlow_Uniform(name='randflow', img_shape=flow_shape, blur_sigma=blur_sigma, flow_amp=flow_amp)(flow) if n_dims == 3: flow = Reshape(flow_shape)(flow) # upsample with linear interpolation flow = Lambda(interp_upsampling)(flow) flow = Lambda(interp_upsampling, output_shape=img_shape[:-1] + (n_dims,))(flow) flow = Reshape(img_shape[:-1] + (n_dims,), name='randflow_out')(flow) else: flow = Reshape(img_shape[:-1] + (n_dims,), name='randflow_out')(flow) x_warped = SpatialTransformer(interp_method=interp_mode, name='densespatialtransformer_img', indexing=indexing)( [x_in, flow]) if model is not None: model_outputs = model(x_warped) if not isinstance(model_outputs, list): model_outputs = [model_outputs] else: model_outputs = [x_warped, flow] return Model(inputs=[x_in], outputs=model_outputs, name=model_name)
Example #26
Source File: cgan-mnist-4.3.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 4 votes |
def build_discriminator(inputs, labels, image_size): """Build a Discriminator Model Inputs are concatenated after Dense layer. Stack of LeakyReLU-Conv2D to discriminate real from fake. The network does not converge with BN so it is not used here unlike in DCGAN paper. Arguments: inputs (Layer): Input layer of the discriminator (the image) labels (Layer): Input layer for one-hot vector to condition the inputs image_size: Target size of one side (assuming square image) Returns: discriminator (Model): Discriminator Model """ kernel_size = 5 layer_filters = [32, 64, 128, 256] x = inputs y = Dense(image_size * image_size)(labels) y = Reshape((image_size, image_size, 1))(y) x = concatenate([x, y]) for filters in layer_filters: # first 3 convolution layers use strides = 2 # last one uses strides = 1 if filters == layer_filters[-1]: strides = 1 else: strides = 2 x = LeakyReLU(alpha=0.2)(x) x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x) x = Flatten()(x) x = Dense(1)(x) x = Activation('sigmoid')(x) # input is conditioned by labels discriminator = Model([inputs, labels], x, name='discriminator') return discriminator
Example #27
Source File: cgan-mnist-4.3.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 4 votes |
def build_generator(inputs, labels, image_size): """Build a Generator Model Inputs are concatenated before Dense layer. Stack of BN-ReLU-Conv2DTranpose to generate fake images. Output activation is sigmoid instead of tanh in orig DCGAN. Sigmoid converges easily. Arguments: inputs (Layer): Input layer of the generator (the z-vector) labels (Layer): Input layer for one-hot vector to condition the inputs image_size: Target size of one side (assuming square image) Returns: generator (Model): Generator Model """ image_resize = image_size // 4 # network parameters kernel_size = 5 layer_filters = [128, 64, 32, 1] x = concatenate([inputs, labels], axis=1) x = Dense(image_resize * image_resize * layer_filters[0])(x) x = Reshape((image_resize, image_resize, layer_filters[0]))(x) for filters in layer_filters: # first two convolution layers use strides = 2 # the last two use strides = 1 if filters > layer_filters[-2]: strides = 2 else: strides = 1 x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x) x = Activation('sigmoid')(x) # input is conditioned by labels generator = Model([inputs, labels], x, name='generator') return generator
Example #28
Source File: dcgan-mnist-4.2.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 4 votes |
def build_generator(inputs, image_size): """Build a Generator Model Stack of BN-ReLU-Conv2DTranpose to generate fake images Output activation is sigmoid instead of tanh in [1]. Sigmoid converges easily. Arguments: inputs (Layer): Input layer of the generator the z-vector) image_size (tensor): Target size of one side (assuming square image) Returns: generator (Model): Generator Model """ image_resize = image_size // 4 # network parameters kernel_size = 5 layer_filters = [128, 64, 32, 1] x = Dense(image_resize * image_resize * layer_filters[0])(inputs) x = Reshape((image_resize, image_resize, layer_filters[0]))(x) for filters in layer_filters: # first two convolution layers use strides = 2 # the last two use strides = 1 if filters > layer_filters[-2]: strides = 2 else: strides = 1 x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x) x = Activation('sigmoid')(x) generator = Model(inputs, x, name='generator') return generator
Example #29
Source File: core.py From crepe with MIT License | 4 votes |
def build_and_load_model(model_capacity): """ Build the CNN model and load the weights Parameters ---------- model_capacity : 'tiny', 'small', 'medium', 'large', or 'full' String specifying the model capacity, which determines the model's capacity multiplier to 4 (tiny), 8 (small), 16 (medium), 24 (large), or 32 (full). 'full' uses the model size specified in the paper, and the others use a reduced number of filters in each convolutional layer, resulting in a smaller model that is faster to evaluate at the cost of slightly reduced pitch estimation accuracy. Returns ------- model : tensorflow.keras.models.Model The pre-trained keras model loaded in memory """ from tensorflow.keras.layers import Input, Reshape, Conv2D, BatchNormalization from tensorflow.keras.layers import MaxPool2D, Dropout, Permute, Flatten, Dense from tensorflow.keras.models import Model if models[model_capacity] is None: capacity_multiplier = { 'tiny': 4, 'small': 8, 'medium': 16, 'large': 24, 'full': 32 }[model_capacity] layers = [1, 2, 3, 4, 5, 6] filters = [n * capacity_multiplier for n in [32, 4, 4, 4, 8, 16]] widths = [512, 64, 64, 64, 64, 64] strides = [(4, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)] x = Input(shape=(1024,), name='input', dtype='float32') y = Reshape(target_shape=(1024, 1, 1), name='input-reshape')(x) for l, f, w, s in zip(layers, filters, widths, strides): y = Conv2D(f, (w, 1), strides=s, padding='same', activation='relu', name="conv%d" % l)(y) y = BatchNormalization(name="conv%d-BN" % l)(y) y = MaxPool2D(pool_size=(2, 1), strides=None, padding='valid', name="conv%d-maxpool" % l)(y) y = Dropout(0.25, name="conv%d-dropout" % l)(y) y = Permute((2, 1, 3), name="transpose")(y) y = Flatten(name="flatten")(y) y = Dense(360, activation='sigmoid', name="classifier")(y) model = Model(inputs=x, outputs=y) package_dir = os.path.dirname(os.path.realpath(__file__)) filename = "model-{}.h5".format(model_capacity) model.load_weights(os.path.join(package_dir, filename)) model.compile('adam', 'binary_crossentropy') models[model_capacity] = model return models[model_capacity]
Example #30
Source File: model.py From CRNN.tf2 with MIT License | 4 votes |
def build_model(num_classes, image_width=None, channels=1): """ build CNN-RNN model """ def vgg_style(input_tensor): """ The original feature extraction structure from CRNN paper. Related paper: https://ieeexplore.ieee.org/abstract/document/7801919 """ x = layers.Conv2D( filters=64, kernel_size=3, padding='same', activation='relu')(input_tensor) x = layers.MaxPool2D(pool_size=2, padding='same')(x) x = layers.Conv2D( filters=128, kernel_size=3, padding='same', activation='relu')(x) x = layers.MaxPool2D(pool_size=2, padding='same')(x) x = layers.Conv2D(filters=256, kernel_size=3, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu')(x) x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 1), padding='same')(x) x = layers.Conv2D(filters=512, kernel_size=3, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=512, kernel_size=3, padding='same', activation='relu')(x) x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 1), padding='same')(x) x = layers.Conv2D(filters=512, kernel_size=2)(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return x img_input = keras.Input(shape=(32, image_width, channels)) x = vgg_style(img_input) x = layers.Reshape((-1, 512))(x) x = layers.Bidirectional(layers.LSTM(units=256, return_sequences=True))(x) x = layers.Bidirectional(layers.LSTM(units=256, return_sequences=True))(x) x = layers.Dense(units=num_classes)(x) return keras.Model(inputs=img_input, outputs=x, name='CRNN')