Python keras.layers.GlobalMaxPooling1D() Examples

The following are 30 code examples of keras.layers.GlobalMaxPooling1D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: convNet.py    From semeval2017-scienceie with Apache License 2.0 9 votes vote down vote up
def build_cnn(input_shape, output_dim,nb_filter):
    clf = Sequential()
    clf.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape))
    clf.add(GlobalMaxPooling1D())
    clf.add(Dense(100))
    clf.add(Dropout(0.2))
    clf.add(Activation("tanh"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf

# just one filter 
Example #2
Source File: MCRecRecommenderWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umtmum = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'umtmum_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input)
    output = conv_umtmum(path_input)
    output = GlobalMaxPooling1D()(output)
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input)
        tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])

    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #3
Source File: keras_bert_classify_text_cnn.py    From nlp_xiaojiang with MIT License 6 votes vote down vote up
def build_model_text_cnn(self):
        #########    text-cnn    #########
        # bert embedding
        bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
        # text cnn
        bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
        concat_out = []
        for index, filter_size in enumerate(self.filters):
            x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
            x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
            concat_out.append(x)
        x = Concatenate(axis=1)(concat_out)
        x = Dropout(self.keep_prob)(x)

        # 最后就是softmax
        dense_layer = Dense(self.label, activation=self.activation)(x)
        output_layers = [dense_layer]
        self.model = Model(bert_inputs, output_layers) 
Example #4
Source File: MCRecRecommenderWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_umtm_embedding(umtm_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umtm = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'umtm_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtm_input)
    output = GlobalMaxPooling1D()(conv_umtm(path_input))
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtm_input)
        tmp_output = GlobalMaxPooling1D()(conv_umtm(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])

    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtm')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #5
Source File: MCRecRecommenderWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umum = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'umum_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
    output = GlobalMaxPooling1D()(conv_umum(path_input))
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
        tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])


    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #6
Source File: MCRecRecommenderWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umum = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'uuum_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
    output = GlobalMaxPooling1D()(conv_umum(path_input))
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
        tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])


    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #7
Source File: MCRec.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umtmum = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'umtmum_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input)
    output = conv_umtmum(path_input)
    output = GlobalMaxPooling1D()(output)
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input)
        tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])
    
    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #8
Source File: MCRec.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umum = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'umum_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
    output = GlobalMaxPooling1D()(conv_umum(path_input))
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
        tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])
    
    
    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #9
Source File: MCRec.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
    conv_umum = Conv1D(filters = 128,
                       kernel_size = 4,
                       activation = 'relu',
                       kernel_regularizer = l2(0.0),
                       kernel_initializer = 'glorot_uniform',
                       padding = 'valid',
                       strides = 1,
                       name = 'uuum_conv')

    path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
    output = GlobalMaxPooling1D()(conv_umum(path_input))
    output = Dropout(0.5)(output)

    for i in range(1, path_num):
        path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
        tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
        tmp_output = Dropout(0.5)(tmp_output)
        output = concatenate([output, tmp_output])
    
    
    output = Reshape((path_num, 128))(output)
    #output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum')
    output = GlobalMaxPooling1D()(output)
    return output 
Example #10
Source File: models.py    From WeSHClass with Apache License 2.0 6 votes vote down vote up
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
                     vocab_sz=None,
                     embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'):
    if embedding_matrix is not None:
        z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,),
                      weights=[embedding_matrix], trainable=word_trainable)(x)
    else:
        z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation=act,
                             strides=1,
                             kernel_initializer=init)(z)
        conv = GlobalMaxPooling1D()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    z = Dense(hidden_dim, activation="relu")(z)
    y = Dense(n_classes, activation="softmax")(z)
    return Model(inputs=x, outputs=y) 
Example #11
Source File: graph.py    From Keras-TextClassification with MIT License 6 votes vote down vote up
def create_model(self, hyper_parameters):
        """
            构建神经网络
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        x = self.word_embedding.output
        x = SpatialDropout1D(self.dropout_spatial)(x)
        x = AttentionSelf(self.word_embedding.embed_size)(x)
        x = GlobalMaxPooling1D()(x)
        x = Dropout(self.dropout)(x)
        # x = Flatten()(x)
        # 最后就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output = [dense_layer]
        self.model = Model(self.word_embedding.input, output)
        self.model.summary(120) 
Example #12
Source File: model.py    From WeSTClass with Apache License 2.0 6 votes vote down vote up
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
                     embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
    x = Input(shape=(input_shape,), name='input')
    z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding", 
                    weights=[embedding_matrix], trainable=word_trainable)(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation=act,
                             strides=1,
                             kernel_initializer=init)(z)
        conv = GlobalMaxPooling1D()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    z = Dense(hidden_dim, activation="relu")(z)
    y = Dense(n_classes, activation="softmax")(z)
    return Model(inputs=x, outputs=y, name='classifier') 
Example #13
Source File: newsgroup_classifier_pretrained_word_embeddings.py    From Deep-Learning-Quick-Reference with MIT License 6 votes vote down vote up
def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix):

    sequence_input = Input(shape=(sequence_length,), dtype='int32')
    embedding_layer = Embedding(input_dim=vocab_size,
                                output_dim=embedding_dim,
                                weights=[embedding_matrix],
                                input_length=sequence_length,
                                trainable=False,
                                name="embedding")(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedding_layer)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(20, activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model 
Example #14
Source File: newsgroup_classifier_word_embeddings.py    From Deep-Learning-Quick-Reference with MIT License 6 votes vote down vote up
def build_model(vocab_size, embedding_dim, sequence_length):
    sequence_input = Input(shape=(sequence_length,), dtype='int32')
    embedding_layer = Embedding(input_dim=vocab_size,
                                output_dim=embedding_dim,
                                input_length=sequence_length,
                                name="embedding")(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedding_layer)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(20, activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model 
Example #15
Source File: convNet.py    From semeval2017-scienceie with Apache License 2.0 6 votes vote down vote up
def build_cnn_char(input_dim, output_dim,nb_filter):
    clf = Sequential()
    clf.add(Embedding(input_dim,
                      32, # character embedding size
                      input_length=maxlen,
                      dropout=0.2))
    clf.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=3,border_mode="valid",activation="relu",subsample_length=1))
    clf.add(GlobalMaxPooling1D())
    clf.add(Dense(100))
    clf.add(Dropout(0.2))
    clf.add(Activation("tanh"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf

# just one filter 
Example #16
Source File: model.py    From kopt with MIT License 5 votes vote down vote up
def build_model(train_data, max_features=5000, maxlen=400,
                batch_size=32, embedding_dims=50,
                filters=250, kernel_size=3, hidden_dims=250):
    print('Build model...')
    model = Sequential()

    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    model.add(Embedding(max_features,
                        embedding_dims,
                        input_length=maxlen))
    model.add(Dropout(0.2))

    # we add a Convolution1D, which will learn filters
    # word group filters of size filter_length:
    model.add(Conv1D(filters,
                     kernel_size,
                     padding='valid',
                     activation='relu',
                     strides=1))
    # we use max pooling:
    model.add(GlobalMaxPooling1D())

    # We add a vanilla hidden layer:
    model.add(Dense(hidden_dims))
    model.add(Dropout(0.2))
    model.add(Activation('relu'))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
    # model.fit(x_train, y_train,
    #           batch_size=batch_size,
    #           epochs=epochs,
    #           validation_data=(x_test, y_test)) 
Example #17
Source File: DeepConvDTI.py    From DeepConv-DTI with GNU General Public License v3.0 5 votes vote down vote up
def PLayer(self, size, filters, activation, initializer, regularizer_param):
        def f(input):
            # model_p = Convolution1D(filters=filters, kernel_size=size, padding='valid', activity_regularizer=l2(regularizer_param), kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input)
            model_p = Convolution1D(filters=filters, kernel_size=size, padding='same', kernel_initializer=initializer, kernel_regularizer=l2(regularizer_param))(input)
            model_p = BatchNormalization()(model_p)
            model_p = Activation(activation)(model_p)
            return GlobalMaxPooling1D()(model_p)
        return f 
Example #18
Source File: graph_emb.py    From ccg2lambda with Apache License 2.0 5 votes vote down vote up
def make_child_parent_branch(token_emb, max_nodes, max_bi_relations):
    node_indices = Input(
        shape=(max_nodes,),
        dtype='int32',
        name='node_inds')
    graph_node_embs = token_emb(node_indices)

    child_rel_outputs, child_rel_inputs = make_pair_branch(
        graph_node_embs,
        max_nodes,
        max_bi_relations,
        label='child')
    parent_rel_outputs, parent_rel_inputs = make_pair_branch(
        graph_node_embs,
        max_nodes,
        max_bi_relations,
        label='parent')

    x = Add(name='child_parent_add')(
        child_rel_outputs + parent_rel_outputs)
    # Integrate node embeddings into a single graph embedding.
    x = GlobalMaxPooling1D()(x)

    outputs = [x]
    inputs = [node_indices] + child_rel_inputs + parent_rel_inputs
    return outputs, inputs 
Example #19
Source File: model.py    From awesome-text-classification with MIT License 5 votes vote down vote up
def build(self):
        sequence_input = Input(shape=(self.max_sequence_length,), dtype='int32')
        if self.weights is None:
            embedding = Embedding(
                self.vocab_size + 1,  # due to mask_zero
                self.embedding_dim,
                input_length=self.max_sequence_length,
            )(sequence_input)
        else:
            embedding = Embedding(
                self.weights.shape[0],  # due to mask_zero
                self.weights.shape[1],
                input_length=self.max_sequence_length,
                weights=[self.weights],
            )(sequence_input)

        convs = []
        for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
            conv = Conv1D(filters=num_filter,
                          kernel_size=filter_size,
                          activation='relu')(embedding)
            pool = GlobalMaxPooling1D()(conv)
            convs.append(pool)

        z = Concatenate()(convs)
        z = Dense(self.num_units)(z)
        z = Dropout(self.keep_prob)(z)
        z = Activation('relu')(z)
        pred = Dense(self.num_tags, activation='softmax')(z)
        model = Model(inputs=[sequence_input], outputs=[pred])

        return model 
Example #20
Source File: keras_mt_shared_cnn.py    From Benchmarks with MIT License 5 votes vote down vote up
def init_export_network(num_classes,
                        in_seq_len,
                        vocab_size,
                        wv_space,
                        filter_sizes,
                        num_filters,
                        concat_dropout_prob,
                        emb_l2,
                        w_l2,
                        optimizer):


    # define network layers ----------------------------------------------------
    input_shape = tuple([in_seq_len])
    model_input = Input(shape=input_shape, name= "Input")
    # embedding lookup
    emb_lookup = Embedding(vocab_size,
                           wv_space,
                           input_length=in_seq_len,
                           name="embedding",
                           #embeddings_initializer=RandomUniform,
                           embeddings_regularizer=l2(emb_l2))(model_input)
    # convolutional layer and dropout
    conv_blocks = []
    for ith_filter,sz in enumerate(filter_sizes):
        conv = Convolution1D(filters=num_filters[ ith_filter ],
                             kernel_size=sz,
                             padding="same",
                             activation="relu",
                             strides=1,
                             # kernel_initializer ='lecun_uniform,
                             name=str(ith_filter) + "_thfilter")(emb_lookup)
        conv_blocks.append(GlobalMaxPooling1D()(conv))
    concat = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    concat_drop = Dropout(concat_dropout_prob)(concat)

    # different dense layer per tasks
    FC_models = []
    for i in range(len(num_classes)):
        outlayer = Dense(num_classes[i], name= "Dense"+str(i), activation='softmax')( concat_drop )#, kernel_regularizer=l2(0.01))( concat_drop )
        FC_models.append(outlayer)


    # the multitsk model
    model = Model(inputs=model_input, outputs = FC_models)
    model.compile( loss= "sparse_categorical_crossentropy", optimizer= optimizer, metrics=[ "acc" ] )

    return model 
Example #21
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        # Global Pooling 1D
        model = Sequential()
        model.add(GlobalMaxPooling1D(input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Global Pooling 2D
        model = Sequential()
        model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 1D
        model = Sequential()
        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Pooling 2D
        model = Sequential()
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 3D
        model = Sequential()
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
                               input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 11)


# ********** Locally-connected Layers ********** 
Example #22
Source File: sequence_encoders.py    From keras-text with MIT License 5 votes vote down vote up
def build_model(self, x):
        pooled_tensors = []
        for filter_size in self.filter_sizes:
            x_i = Conv1D(self.num_filters, filter_size, activation='elu', **self.conv_kwargs)(x)
            x_i = GlobalMaxPooling1D()(x_i)
            pooled_tensors.append(x_i)

        x = pooled_tensors[0] if len(self.filter_sizes) == 1 else concatenate(pooled_tensors, axis=-1)
        return x 
Example #23
Source File: models.py    From Neural-Headline-Generator-CN with GNU General Public License v3.0 5 votes vote down vote up
def c2r(dic_len,input_length,output_length,emb_dim=128,hidden=512,nb_filter=64,deepth=(1,1),stride=3):
    model = Sequential()
    model.add(Embedding(input_dim=dic_len, output_dim=emb_dim, input_length=input_length))
    for l in range(deepth[0]):
        model.add(Conv1D(nb_filter,3,activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dropout(0.5))
    model.add(RepeatVector(output_length))
    for l in range(deepth[0]):
        model.add(LSTM(hidden, return_sequences=True))
    model.add(TimeDistributed(Dense(units=dic_len, activation='softmax')))
    model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['acc'])
    return model 
Example #24
Source File: models.py    From SeqGAN with MIT License 5 votes vote down vote up
def VariousConv1D(x, filter_sizes, num_filters, name_prefix=''):
    '''
    Layer wrapper function for various filter sizes Conv1Ds
    # Arguments:
        x: tensor, shape = (B, T, E)
        filter_sizes: list of int, list of each Conv1D filter sizes
        num_filters: list of int, list of each Conv1D num of filters
        name_prefix: str, layer name prefix
    # Returns:
        out: tensor, shape = (B, sum(num_filters))
    '''
    conv_outputs = []
    for filter_size, n_filter in zip(filter_sizes, num_filters):
        conv_name = '{}VariousConv1D/Conv1D/filter_size_{}'.format(name_prefix, filter_size)
        pooling_name = '{}VariousConv1D/MaxPooling/filter_size_{}'.format(name_prefix, filter_size)
        conv_out = Conv1D(n_filter, filter_size, name=conv_name)(x)   # (B, time_steps, n_filter)
        conv_out = GlobalMaxPooling1D(name=pooling_name)(conv_out) # (B, n_filter)
        conv_outputs.append(conv_out)
    concatenate_name = '{}VariousConv1D/Concatenate'.format(name_prefix)
    out = Concatenate(name=concatenate_name)(conv_outputs)
    return out 
Example #25
Source File: models.py    From toxic_comments with MIT License 5 votes vote down vote up
def cnn(embedding_matrix, char_matrix, num_classes, max_seq_len, max_ll3_seq_len,
        num_filters=64, l2_weight_decay=0.0001, dropout_val=0.5,
        dense_dim=32, add_sigmoid=True, train_embeds=False, gpus=0,
        n_cnn_layers=1, pool='max', add_embeds=False):
    if pool == 'max':
        Pooling = MaxPooling1D
        GlobalPooling = GlobalMaxPooling1D
    elif pool == 'avg':
        Pooling = AveragePooling1D
        GlobalPooling = GlobalAveragePooling1D
    input_ = Input(shape=(max_seq_len,))
    embeds = Embedding(embedding_matrix.shape[0],
                       embedding_matrix.shape[1],
                       weights=[embedding_matrix],
                       input_length=max_seq_len,
                       trainable=train_embeds)(input_)
    x = embeds
    for i in range(n_cnn_layers-1):
        x = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
        x = Pooling(2)(x)
    x = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
    x = GlobalPooling()(x)
    if add_embeds:
        x1 = Conv1D(num_filters, 7, activation='relu', padding='same')(embeds)
        x1 = GlobalPooling()(x1)
        x = Concatenate()([x, x1])
    x = BatchNormalization()(x)
    x = Dropout(dropout_val)(x)
    x = Dense(dense_dim, activation='relu', kernel_regularizer=regularizers.l2(l2_weight_decay))(x)
    if add_sigmoid:
        x = Dense(num_classes, activation='sigmoid')(x)
    model = Model(inputs=input_, outputs=x)
    if gpus > 0:
        model = multi_gpu_model(model, gpus=gpus)
    return model 
Example #26
Source File: graph.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def create_model(self, hyper_parameters):
        """
            构建神经网络
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS
        # # text cnn
        # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
        # concat_out = []
        # for index, filter_size in enumerate(self.filters):
        #     x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
        #                filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
        #                strides=1,
        #                kernel_size=self.filters[index],
        #                padding='valid',
        #                kernel_initializer='normal',
        #                activation='relu')(bert_output_emmbed)
        #     x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
        #     concat_out.append(x)
        # x = Concatenate(axis=1)(concat_out)
        # x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最后就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output_layers = [dense_layer]
        self.model = Model(self.word_embedding.input, output_layers)
        self.model.summary(120) 
Example #27
Source File: graph.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def create_model(self, hyper_parameters):
        """
            构建神经网络
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding = self.word_embedding.output

        def win_mean(x):
            res_list = []
            for i in range(self.len_max-self.n_win+1):
                x_mean = tf.reduce_mean(x[:, i:i + self.n_win, :], axis=1)
                x_mean_dims = tf.expand_dims(x_mean, axis=-1)
                res_list.append(x_mean_dims)
            res_list = tf.concat(res_list, axis=-1)
            gg = tf.reduce_max(res_list, axis=-1)
            return gg

        if self.encode_type=="HIERARCHICAL":
            x = Lambda(win_mean, output_shape=(self.embed_size, ))(embedding)
        elif self.encode_type=="MAX":
            x = GlobalMaxPooling1D()(embedding)
        elif self.encode_type=="AVG":
            x = GlobalAveragePooling1D()(embedding)
        elif self.encode_type == "CONCAT":
            x_max = GlobalMaxPooling1D()(embedding)
            x_avg = GlobalAveragePooling1D()(embedding)
            x = Concatenate()([x_max, x_avg])
        else:
            raise RuntimeError("encode_type must be 'MAX', 'AVG', 'CONCAT', 'HIERARCHICAL'")

        output = Dense(self.label, activation=self.activate_classify)(x)
        self.model = Model(inputs=self.word_embedding.input, outputs=output)
        self.model.summary(132) 
Example #28
Source File: graph.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def create_model(self, hyper_parameters):
        """
            构建神经网络
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding = self.word_embedding.output
        x = GlobalMaxPooling1D()(embedding)
        output = Dense(self.label, activation=self.activate_classify)(x)
        self.model = Model(inputs=self.word_embedding.input, outputs=output)
        self.model.summary(132) 
Example #29
Source File: graph.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def create_model(self, hyper_parameters):
        """
            构建神经网络
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        # x = embedding_output
        x = Lambda(lambda x : x[:, -2:-1, :])(embedding_output) # 获取CLS
        # # text cnn
        # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
        # concat_out = []
        # for index, filter_size in enumerate(self.filters):
        #     x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
        #                filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
        #                strides=1,
        #                kernel_size=self.filters[index],
        #                padding='valid',
        #                kernel_initializer='normal',
        #                activation='relu')(bert_output_emmbed)
        #     x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
        #     concat_out.append(x)
        # x = Concatenate(axis=1)(concat_out)
        # x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最后就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output_layers = [dense_layer]
        self.model = Model(self.word_embedding.input, output_layers)
        self.model.summary(120) 
Example #30
Source File: keras_bert_classify_text_cnn.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def build_model_r_cnn(self):
        #########    RCNN    #########
        # bert embedding
        bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
        # rcnn
        bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
        if args.use_lstm:
            if args.use_cudnn_cell:
                layer_cell = CuDNNLSTM
            else:
                layer_cell = LSTM
        else:
            if args.use_cudnn_cell:
                layer_cell = CuDNNGRU
            else:
                layer_cell = GRU

        x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences,
                                     kernel_regularizer=regularizers.l2(args.l2 * 0.1),
                                     recurrent_regularizer=regularizers.l2(args.l2)
                                     ))(bert_output_emmbed)
        x = Dropout(args.keep_prob)(x)
        x = Conv1D(filters=int(self.embedding_dim / 2), kernel_size=2, padding='valid', kernel_initializer='normal', activation='relu')(x)
        x = GlobalMaxPooling1D()(x)
        x = Dropout(args.keep_prob)(x)
        # 最后就是softmax
        dense_layer = Dense(self.label, activation=self.activation)(x)
        output_layers = [dense_layer]
        self.model = Model(bert_inputs, output_layers)