Python tensorflow.python.keras.layers.Dropout() Examples
The following are 10
code examples of tensorflow.python.keras.layers.Dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.layers
, or try the search function
.
Example #1
Source File: rnn.py From cxplain with MIT License | 6 votes |
def build(self, input_layer): last_layer = input_layer input_shape = K.int_shape(input_layer) if self.with_embedding: if input_shape[-1] != 1: raise ValueError("Only one feature (the index) can be used with embeddings, " "i.e. the input shape should be (num_samples, length, 1). " "The actual shape was: " + str(input_shape)) last_layer = Lambda(lambda x: K.squeeze(x, axis=-1), output_shape=K.int_shape(last_layer)[:-1])(last_layer) # Remove feature dimension. last_layer = Embedding(self.embedding_size, self.embedding_dimension, input_length=input_shape[-2])(last_layer) for _ in range(self.num_layers): last_layer = Dense(self.num_units, activation=self.activation)(last_layer) if self.with_bn: last_layer = BatchNormalization()(last_layer) if not np.isclose(self.p_dropout, 0): last_layer = Dropout(self.p_dropout)(last_layer) return last_layer
Example #2
Source File: gcn.py From GraphNeuralNetwork with MIT License | 6 votes |
def build(self, input_shapes): if self.feature_less: input_dim = int(input_shapes[0][-1]) else: assert len(input_shapes) == 2 features_shape = input_shapes[0] input_dim = int(features_shape[-1]) self.kernel = self.add_weight(shape=(input_dim, self.units), initializer=glorot_uniform( seed=self.seed), regularizer=l2(self.l2_reg), name='kernel', ) if self.use_bias: self.bias = self.add_weight(shape=(self.units,), initializer=Zeros(), name='bias', ) self.dropout = Dropout(self.dropout_rate, seed=self.seed) self.built = True
Example #3
Source File: densenet.py From ImageAI with MIT License | 5 votes |
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4): ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout Args: ip: Input keras tensor nb_filter: number of filters bottleneck: add bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck) ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) if bottleneck: inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #4
Source File: baisc.py From FATE with Apache License 2.0 | 5 votes |
def _build_dropout(rate, noise_shape=None, seed=None, **kwargs): return layers.Dropout(rate, noise_shape=noise_shape, seed=seed, **kwargs)
Example #5
Source File: mlp.py From cxplain with MIT License | 5 votes |
def build(self, input_layer): last_layer = input_layer for _ in range(self.num_layers): last_layer = Dense(self.num_units, activation=self.activation)(last_layer) if self.with_bn: last_layer = BatchNormalization()(last_layer) if not np.isclose(self.p_dropout, 0): last_layer = Dropout(self.p_dropout)(last_layer) return last_layer
Example #6
Source File: small_cnn.py From camera-trap-classifier with MIT License | 5 votes |
def architecture(inputs): """ Architecture of model """ conv1 = Conv2D(32, kernel_size=(3, 3), activation='relu')(inputs) max1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(32, (3, 3), activation='relu')(max1) max2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(64, (3, 3), activation='relu')(max2) max3 = MaxPooling2D(pool_size=(2, 2))(conv3) flat1 = Flatten()(max3) dense1 = Dense(64, activation='relu')(flat1) drop1 = Dropout(0.5)(dense1) return drop1
Example #7
Source File: graphsage.py From GraphNeuralNetwork with MIT License | 5 votes |
def build(self, input_shapes): self.neigh_weights = self.add_weight(shape=(self.input_dim, self.units), initializer=glorot_uniform( seed=self.seed), regularizer=l2(self.l2_reg), name="neigh_weights") if self.use_bias: self.bias = self.add_weight(shape=(self.units), initializer=Zeros(), name='bias_weight') self.dropout = Dropout(self.dropout_rate) self.built = True
Example #8
Source File: gat.py From GraphNeuralNetwork with MIT License | 5 votes |
def build(self, input_shape): X, A = input_shape embedding_size = int(X[-1]) self.weight = self.add_weight(name='weight', shape=[embedding_size, self.att_embedding_size * self.head_num], dtype=tf.float32, regularizer=l2(self.l2_reg), initializer=tf.keras.initializers.glorot_uniform()) self.att_self_weight = self.add_weight(name='att_self_weight', shape=[1, self.head_num, self.att_embedding_size], dtype=tf.float32, regularizer=l2(self.l2_reg), initializer=tf.keras.initializers.glorot_uniform()) self.att_neighs_weight = self.add_weight(name='att_neighs_weight', shape=[1, self.head_num, self.att_embedding_size], dtype=tf.float32, regularizer=l2(self.l2_reg), initializer=tf.keras.initializers.glorot_uniform()) if self.use_bias: self.bias_weight = self.add_weight(name='bias', shape=[1, self.head_num, self.att_embedding_size], dtype=tf.float32, initializer=Zeros()) self.in_dropout = Dropout(self.dropout_rate) self.feat_dropout = Dropout(self.dropout_rate, ) self.att_dropout = Dropout(self.dropout_rate, ) # Be sure to call this somewhere! super(GATLayer, self).build(input_shape)
Example #9
Source File: RTSNNet.py From alpha-zero-general with MIT License | 5 votes |
def __init__(self, game, encoder): """ NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels :param game: game configuration :param encoder: Encoder, used to encode game boards """ from rts.src.config_class import CONFIG # game params self.board_x, self.board_y, num_encoders = game.getBoardSize() self.action_size = game.getActionSize() """ num_encoders = CONFIG.nnet_args.encoder.num_encoders """ num_encoders = encoder.num_encoders # Neural Net self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders)) # s: batch_size x board_x x board_y x num_encoders x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards) # batch_size x board_x x board_y x num_encoders h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image))) # batch_size x board_x x board_y x num_channels h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1))) # batch_size x board_x x board_y x num_channels h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2))) # batch_size x (board_x-2) x (board_y-2) x num_channels h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3))) # batch_size x (board_x-4) x (board_y-4) x num_channels h_conv4_flat = Flatten()(h_conv4) s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat)))) # batch_size x 1024 s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1)))) # batch_size x 1024 self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2) # batch_size x self.action_size self.v = Dense(1, activation='tanh', name='v')(s_fc2) # batch_size x 1 self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v]) self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr))
Example #10
Source File: pbt_tune_cifar10_with_keras.py From ray with Apache License 2.0 | 4 votes |
def _build_model(self, input_shape): x = Input(shape=(32, 32, 3)) y = x y = Convolution2D( filters=64, kernel_size=3, strides=1, padding="same", activation="relu", kernel_initializer="he_normal")(y) y = Convolution2D( filters=64, kernel_size=3, strides=1, padding="same", activation="relu", kernel_initializer="he_normal")(y) y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y) y = Convolution2D( filters=128, kernel_size=3, strides=1, padding="same", activation="relu", kernel_initializer="he_normal")(y) y = Convolution2D( filters=128, kernel_size=3, strides=1, padding="same", activation="relu", kernel_initializer="he_normal")(y) y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y) y = Convolution2D( filters=256, kernel_size=3, strides=1, padding="same", activation="relu", kernel_initializer="he_normal")(y) y = Convolution2D( filters=256, kernel_size=3, strides=1, padding="same", activation="relu", kernel_initializer="he_normal")(y) y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y) y = Flatten()(y) y = Dropout(self.config.get("dropout", 0.5))(y) y = Dense( units=10, activation="softmax", kernel_initializer="he_normal")(y) model = Model(inputs=x, outputs=y, name="model1") return model