Python keras.layers.Convolution3D() Examples
The following are 16
code examples of keras.layers.Convolution3D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source File: auto_classifier_model.py From aitom with GNU General Public License v3.0 | 6 votes |
def conv_block(x, nb_filter, nb0, nb1, nb2, border_mode='same', subsample=(1, 1, 1), bias=True, batch_norm=False): if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 x = Convolution3D(nb_filter, nb0, nb1, nb2, subsample=subsample, border_mode=border_mode, bias=bias)(x) if batch_norm: assert not bias x = BatchNormalization(axis=channel_axis)(x) else: assert bias x = Activation('relu')(x) return x
Example #2
Source File: subdivide.py From aitom with GNU General Public License v3.0 | 6 votes |
def dsrff3D(image_size, num_labels): num_channels=1 inputs = Input(shape = (image_size, image_size, image_size, num_channels)) # modified VGG19 architecture bn_axis = 3 m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs) m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(m) m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m) m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m) m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m) m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m) m = Flatten(name='flatten')(m) m = Dense(512, activation='relu', name='fc1')(m) m = Dense(512, activation='relu', name='fc2')(m) m = Dense(num_labels, activation='softmax')(m) mod = KM.Model(inputs=inputs, outputs=m) return mod
Example #3
Source File: seg_util.py From aitom with GNU General Public License v3.0 | 6 votes |
def conv_block(x, nb_filter, nb0, nb1, nb2, border_mode='same', subsample=(1, 1, 1), bias=True, batch_norm=False): from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten, Activation from keras.layers.normalization import BatchNormalization from keras import backend as K if K.image_dim_ordering() == "th": channel_axis = 1 else: channel_axis = -1 x = Convolution3D(nb_filter, nb0, nb1, nb2, subsample=subsample, border_mode=border_mode, bias=bias)(x) if batch_norm: assert not bias x = BatchNormalization(axis=channel_axis)(x) else: assert bias x = Activation('relu')(x) return x
Example #4
Source File: m10a.py From kaggle-lung-cancer with Apache License 2.0 | 6 votes |
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
Example #5
Source File: sd01a.py From kaggle-lung-cancer with Apache License 2.0 | 6 votes |
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
Example #6
Source File: m10a.py From kaggle-lung-cancer with Apache License 2.0 | 6 votes |
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
Example #7
Source File: sd01a.py From kaggle-lung-cancer with Apache License 2.0 | 6 votes |
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
Example #8
Source File: subdivide.py From aitom with GNU General Public License v3.0 | 5 votes |
def inception3D(image_size, num_labels): num_channels=1 inputs = Input(shape = (image_size, image_size, image_size, num_channels)) m = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='valid', input_shape=())(inputs) m = MaxPooling3D(pool_size=(2, 2, 2), strides=None, border_mode='same')(m) # inception module 0 branch1x1 = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m) branch3x3_reduce = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m) branch3x3 = Convolution3D(64, 3, 3, 3, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch3x3_reduce) branch5x5_reduce = Convolution3D(16, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m) branch5x5 = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch5x5_reduce) branch_pool = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='same')(m) branch_pool_proj = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch_pool) #m = merge([branch1x1, branch3x3, branch5x5, branch_pool_proj], mode='concat', concat_axis=-1) from keras.layers import concatenate m = concatenate([branch1x1, branch3x3, branch5x5, branch_pool_proj],axis=-1) m = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='valid')(m) m = Flatten()(m) m = Dropout(0.7)(m) # expliciately seperate Dense and Activation layers in order for projecting to structural feature space m = Dense(num_labels, activation='linear')(m) m = Activation('softmax')(m) mod = KM.Model(input=inputs, output=m) return mod
Example #9
Source File: m10a.py From kaggle-lung-cancer with Apache License 2.0 | 5 votes |
def define_model(image_shape): img_input = Input(shape=image_shape) x = Convolution3D(16, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(img_input) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=2) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=2) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=2) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = res_block(x, nb_filters=256, block=4, subsample_factor=2) x = res_block(x, nb_filters=256, block=4, subsample_factor=1) x = res_block(x, nb_filters=256, block=4, subsample_factor=1) x = res_block(x, nb_filters=256, block=4, subsample_factor=1) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), border_mode='valid')(x) x = Flatten()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(img_input, x) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure']) model.summary() return model
Example #10
Source File: sd01a.py From kaggle-lung-cancer with Apache License 2.0 | 5 votes |
def define_model(image_shape): img_input = Input(shape=image_shape) x = Convolution3D(16, 5, 5, 5, subsample=(1, 1, 1), border_mode='same')(img_input) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=2) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=2) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=2) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(4, 4, 8))(x) x = Flatten()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(img_input, x) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure']) model.summary() return model
Example #11
Source File: sd01a.py From kaggle-lung-cancer with Apache License 2.0 | 5 votes |
def define_model(): img_input = Input(shape=(32, 32, 64, 1)) x = Convolution3D(16, 5, 5, 5, subsample=(1, 1, 1), border_mode='same')(img_input) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=2) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=2) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=2) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(4, 4, 8))(x) x = Flatten()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(img_input, x) model.compile(optimizer='adam', loss='binary_crossentropy') return model
Example #12
Source File: deepdrug3d.py From DeepDrug3D with GNU General Public License v3.0 | 5 votes |
def build(): model = Sequential() # Conv layer 1 model.add(Convolution3D( input_shape = (14,32,32,32), filters=64, kernel_size=5, padding='valid', # Padding method data_format='channels_first', )) model.add(LeakyReLU(alpha = 0.1)) # Dropout 1 model.add(Dropout(0.2)) # Conv layer 2 model.add(Convolution3D( filters=64, kernel_size=3, padding='valid', # Padding method data_format='channels_first', )) model.add(LeakyReLU(alpha = 0.1)) # Maxpooling 1 model.add(MaxPooling3D( pool_size=(2,2,2), strides=None, padding='valid', # Padding method data_format='channels_first' )) # Dropout 2 model.add(Dropout(0.4)) # FC 1 model.add(Flatten()) model.add(Dense(128)) # TODO changed to 64 for the CAM model.add(LeakyReLU(alpha = 0.1)) # Dropout 3 model.add(Dropout(0.4)) # Fully connected layer 2 to shape (2) for 2 classes model.add(Dense(2)) model.add(Activation('softmax')) return model
Example #13
Source File: network3d.py From Hyperspectral-Image-Spatial-Super-Resolution-via-3D-Full-Convolutional-Neural-Network with BSD 3-Clause "New" or "Revised" License | 5 votes |
def srcnn(input_shape=(33,33,110,1)): #for ROSIS sensor model = Sequential() model.add(Convolution3D(64, 9, 9, 7, input_shape=input_shape, activation='relu')) model.add(Convolution3D(32, 1, 1, 1, activation='relu')) model.add(Convolution3D(9, 1, 1, 1, activation='relu')) model.add(Convolution3D(1, 5, 5, 3)) model.compile(Adam(lr=0.00005), 'mse') return model
Example #14
Source File: auto_classifier_model.py From aitom with GNU General Public License v3.0 | 4 votes |
def auto_classifier_model(img_shape, encoding_dim=128, NUM_CHANNELS=1, num_of_class=2): input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS) mask_shape = (None, num_of_class) # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term input_img = Input(shape=input_shape[1:]) mask = Input(shape=mask_shape[1:]) x = input_img x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), padding ='same')(x) x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), padding ='same')(x) encoder_conv_shape = [_.value for _ in x.get_shape()] # x.get_shape() returns a list of tensorflow.python.framework.tensor_shape.Dimension objects x = Flatten()(x) encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x) encoder = Model(inputs=input_img, outputs=encoded) x = BatchNormalization()(x) x = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x) x = Dense(128, activation = 'relu')(x) x = Dense(num_of_class, activation = 'softmax')(x) prob = x # classifier output classifier = Model(inputs=input_img, outputs=prob) input_img_decoder = Input(shape=encoder.output_shape[1:]) x = input_img_decoder x = Dense(np.prod(encoder_conv_shape[1:]), activation='relu')(x) x = Reshape(encoder_conv_shape[1:])(x) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = Convolution3D(1, (3, 3, 3), activation='linear', padding ='same')(x) decoded = x # autoencoder output decoder = Model(inputs=input_img_decoder, outputs=decoded) autoencoder = Sequential() for l in encoder.layers: autoencoder.add(l) last = None for l in decoder.layers: last = l autoencoder.add(l) decoded = autoencoder(input_img) auto_classifier = Model(inputs=input_img, outputs=[decoded, prob]) auto_classifier.summary() return auto_classifier
Example #15
Source File: step2_train_nodule_detector.py From kaggle_ndsb2017 with MIT License | 4 votes |
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None, features=False, mal=False) -> Model: inputs = Input(shape=input_shape, name="input_1") x = inputs x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x) x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x) # 2nd layer group x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x) if USE_DROPOUT: x = Dropout(p=0.3)(x) # 3rd layer group x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x) x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x) if USE_DROPOUT: x = Dropout(p=0.4)(x) # 4th layer group x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x) x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1),)(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x) if USE_DROPOUT: x = Dropout(p=0.5)(x) last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x) out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64) out_class = Flatten(name="out_class")(out_class) out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64) out_malignancy = Flatten(name="out_malignancy")(out_malignancy) model = Model(input=inputs, output=[out_class, out_malignancy]) if load_weight_path is not None: model.load_weights(load_weight_path, by_name=False) model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error}, metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error}) if features: model = Model(input=inputs, output=[last64]) model.summary(line_length=140) return model
Example #16
Source File: network.py From cocktail-party with MIT License | 4 votes |
def build(video_shape, audio_spectrogram_size): model = Sequential() model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero1', input_shape=video_shape)) model.add(Convolution3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv1')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1')) model.add(Dropout(0.25)) model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero2')) model.add(Convolution3D(64, (3, 5, 5), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv2')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2')) model.add(Dropout(0.25)) model.add(ZeroPadding3D(padding=(1, 1, 1), name='zero3')) model.add(Convolution3D(128, (3, 3, 3), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv3')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3')) model.add(Dropout(0.25)) model.add(TimeDistributed(Flatten(), name='time')) model.add(Dense(1024, kernel_initializer='he_normal', name='dense1')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Dropout(0.25)) model.add(Dense(1024, kernel_initializer='he_normal', name='dense2')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2048, kernel_initializer='he_normal', name='dense3')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Dropout(0.25)) model.add(Dense(2048, kernel_initializer='he_normal', name='dense4')) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Dropout(0.25)) model.add(Dense(audio_spectrogram_size, name='output')) model.summary() return VideoToSpeechNet(model)