Python keras.models.Graph() Examples

The following are 13 code examples of keras.models.Graph(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.models , or try the search function .
Example #1
Source File: test_graph_model.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_1o_1i(self):
        print('test a non-sequential graph with 1 input and 1 output')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')

        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
        graph.compile('rmsprop', {'output1': 'mse'})

        history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
        out = graph.predict({'input1': X_test})
        assert(type(out == dict))
        assert(len(out) == 1)
        loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.evaluate({'input1': X_test, 'output1': y_test})
        print(loss)
        assert(loss < 2.5) 
Example #2
Source File: test_graph_model.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_1o_1i_2(self):
        print('test a more complex non-sequential graph with 1 input and 1 output')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2-0', input='input1')
        graph.add_node(Activation('relu'), name='dense2', input='dense2-0')

        graph.add_node(Dense(4, 16), name='dense3', input='dense2')
        graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum')

        graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum')
        graph.compile('rmsprop', {'output1': 'mse'})

        history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
        out = graph.predict({'input1': X_train})
        assert(type(out == dict))
        assert(len(out) == 1)
        loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.evaluate({'input1': X_test, 'output1': y_test})
        print(loss)
        assert(loss < 2.5)
        graph.get_config(verbose=1) 
Example #3
Source File: test_graph_model.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_1o_2i(self):
        print('test a non-sequential graph with 2 inputs and 1 output')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_input(name='input2', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input2')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')

        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
        graph.compile('rmsprop', {'output1': 'mse'})

        history = graph.fit({'input1': X_train, 'input2': X2_train, 'output1': y_train}, nb_epoch=10)
        out = graph.predict({'input1': X_test, 'input2': X2_test})
        assert(type(out == dict))
        assert(len(out) == 1)
        loss = graph.test_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
        loss = graph.train_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
        loss = graph.evaluate({'input1': X_test, 'input2': X2_test, 'output1': y_test})
        print(loss)
        assert(loss < 3.0)
        graph.get_config(verbose=1) 
Example #4
Source File: test_graph_model.py    From CAPTCHA-breaking with MIT License 6 votes vote down vote up
def test_recursive(self):
        print('test layer-like API')

        graph = containers.Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')
        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')

        seq = Sequential()
        seq.add(Dense(32, 32, name='first_seq_dense'))
        seq.add(graph)
        seq.add(Dense(4, 4, name='last_seq_dense'))

        seq.compile('rmsprop', 'mse')

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert(loss < 2.5)

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1) 
Example #5
Source File: base.py    From keras-rtst with MIT License 6 votes vote down vote up
def create_res_texture_net(input_rows, input_cols, num_res_filters=128,
        res_out_activation='linear', activation='relu', num_res_blocks=5):
    net = Graph()
    net.add_input('x', input_shape=(3, input_rows, input_cols))
    add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation)
    add_conv_block(net, 'in1', 'in0', num_res_filters // 2, 3, subsample=(2, 2), activation=activation)
    add_conv_block(net, 'in2', 'in1', num_res_filters, 3, subsample=(2, 2), activation=activation)
    last_block_name = 'in2'
    for res_i in range(num_res_blocks):
        block_name = 'res_{}'.format(res_i)
        add_conv_block(net, block_name + '_in0', last_block_name, num_res_filters, 3, activation=activation)
        add_conv_block(net, block_name + '_in1', block_name + '_in0', num_res_filters, 3, activation='linear')
        net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_in1', last_block_name])
        last_block_name = block_name
    # theano doesn't seem to support fractionally-strided convolutions at the moment
    net.add_node(UpSampling2D(), 'out_up0', last_block_name)
    add_conv_block(net, 'out_0', 'out_up0', num_res_filters // 2, 3, activation=activation)
    net.add_node(UpSampling2D(), 'out_up1', 'out_0')
    add_conv_block(net, 'out_1', 'out_up1', num_res_filters // 4, 3, activation=activation)
    add_conv_block(net, 'out_2', 'out_1', 3, 9, activation='linear')
    net.add_node(Activation('linear'), 'texture_rgb', 'out_2', create_output=True)
    return net 
Example #6
Source File: model_visualization.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def model_picture(model, to_file='local/model.png'):

    graph = pydot.Dot(graph_type='digraph')
    if isinstance(model,Sequential):
        previous_node = None
        written_nodes = []
        n = 1
        for node in model.get_config()['layers']:
            # append number in case layers have same name to differentiate
            if (node['name'] + str(n)) in written_nodes:
                n += 1
            current_node = pydot.Node(node['name'] + str(n))
            written_nodes.append(node['name'] + str(n))
            graph.add_node(current_node)
            if previous_node:
                graph.add_edge(pydot.Edge(previous_node, current_node))
            previous_node = current_node
        graph.write_png(to_file)

    elif isinstance(model,Graph):
        # don't need to append number for names since all nodes labeled
        for input_node in model.input_config:
            graph.add_node(pydot.Node(input_node['name']))

        # intermediate and output nodes have input defined
        for layer_config in [model.node_config, model.output_config]:
            for node in layer_config:
                graph.add_node(pydot.Node(node['name']))
                # possible to have multiple 'inputs' vs 1 'input'
                if node['inputs']:
                    for e in node['inputs']:
                        graph.add_edge(pydot.Edge(e, node['name']))
                else:
                    graph.add_edge(pydot.Edge(node['input'], node['name']))

        graph.write_png(to_file) 
Example #7
Source File: test_loss_weighting.py    From CAPTCHA-breaking with MIT License 5 votes vote down vote up
def create_graph_model():
    model = Graph()
    model.add_input(name='input')
    model.add_node(Dense(784, 50, activation='relu'), name='d1', input='input')
    model.add_node(Dense(50, 10, activation='softmax'), name='d2', input='d1')
    model.add_output(name='output', input='d2')
    return model 
Example #8
Source File: test_graph_model.py    From CAPTCHA-breaking with MIT License 5 votes vote down vote up
def test_2o_1i_weights(self):
        print('test a non-sequential graph with 1 input and 2 outputs')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 1), name='dense3', input='dense1')

        graph.add_output(name='output1', input='dense2')
        graph.add_output(name='output2', input='dense3')
        graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})

        history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10)
        out = graph.predict({'input1': X_test})
        assert(type(out == dict))
        assert(len(out) == 2)
        loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        loss = graph.train_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        loss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        print(loss)
        assert(loss < 4.)

        print('test weight saving')
        graph.save_weights('temp.h5', overwrite=True)
        graph = Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 1), name='dense3', input='dense1')
        graph.add_output(name='output1', input='dense2')
        graph.add_output(name='output2', input='dense3')
        graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
        graph.load_weights('temp.h5')
        nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        print(nloss)
        assert(loss == nloss) 
Example #9
Source File: model.py    From DeepSequenceClassification with GNU General Public License v2.0 5 votes vote down vote up
def gen_model_brnn(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
    RNN_CLASS = LSTM
    if RNN_LAYER_TYPE == "GRU":
        RNN_CLASS = GRU
    logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
            (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
    logger.info("Building Graph model for Bidirectional RNN")
    model = Graph()
    model.add_input(name='input', input_shape=(maxlen,), dtype=int)
    logger.info("Added Input node")
    logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
    model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen), name='embedding', input='input')
    logger.info("Added Embedding node")
    model.add_node(Dropout(0.5), name="dropout_0", input="embedding")
    logger.info("Added Dropout Node")
    for i in xrange(num_hidden_layers):
        last_dropout_name = "dropout_%s" % i
        forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]]
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name)
        logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name)
        logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name])
        logger.info("Added Dropout node[%s]" % (i+1))
    model.add_node(TimeDistributedDense(output_size, activation="softmax"), name="tdd", input=dropout_name)
    logger.info("Added TimeDistributedDense node")
    model.add_output(name="output", input="tdd")
    logger.info("Added Output node")
    logger.info("Created model with following config:\n%s" % model.get_config())
    logger.info("Compiling model with optimizer %s" % optimizer)
    start_time = time.time()
    model.compile(optimizer, {"output": 'categorical_crossentropy'})
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f seconds." % total_time)
    return model 
Example #10
Source File: model.py    From DeepSequenceClassification with GNU General Public License v2.0 5 votes vote down vote up
def gen_model_brnn_multitask(vocab_size=100, embedding_size=128, maxlen=100, output_size=[6, 96], hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
    RNN_CLASS = LSTM
    if RNN_LAYER_TYPE == "GRU":
        RNN_CLASS = GRU
    logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
            (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
    logger.info("Building Graph model for Bidirectional RNN")
    model = Graph()
    model.add_input(name='input', input_shape=(maxlen,), dtype=int)
    logger.info("Added Input node")
    logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
    model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen, mask_zero=True), name='embedding', input='input')
    logger.info("Added Embedding node")
    model.add_node(Dropout(0.5), name="dropout_0", input="embedding")
    logger.info("Added Dropout Node")
    for i in xrange(num_hidden_layers):
        last_dropout_name = "dropout_%s" % i
        forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]]
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name)
        logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name)
        logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name])
        logger.info("Added Dropout node[%s]" % (i+1))
    output_names = []
    for i, output_task_size in enumerate(output_size):
        tdd_name, output_name = "tdd_%s" % i, "output_%s" % i
        model.add_node(TimeDistributedDense(output_task_size, activation="softmax"), name=tdd_name, input=dropout_name)
        logger.info("Added TimeDistributedDense node %s with output_size %s" % (i, output_task_size))
        model.add_output(name=output_name, input=tdd_name)
        output_names.append(output_name)
    logger.info("Added Output node")
    logger.info("Created model with following config:\n%s" % model.get_config())
    logger.info("Compiling model with optimizer %s" % optimizer)
    start_time = time.time()
    model.compile(optimizer, {k: 'categorical_crossentropy' for k in output_names})
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f seconds." % total_time)
    return model, output_names 
Example #11
Source File: girthy.py    From keras-rtst with MIT License 5 votes vote down vote up
def create_res_texture_net(input_rows, input_cols, num_res_filters=128,
        res_out_activation='linear', activation='relu', num_res_blocks=5, depth=3):
    '''Adds a series of residual blocks at each resolution scale, rather than just
    the minimium one.
    '''
    net = Graph()
    net.add_input('x', input_shape=(3, input_rows, input_cols))
    add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation)
    last_name = 'in0'
    # scale down input to max depth with a series of strided convolutions
    for scale_i in range(depth):
        num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1))
        scale_name = 'down_{}'.format(scale_i)
        add_conv_block(net, scale_name, last_name, num_scale_filters, 3, subsample=(2, 2), activation=activation)
        last_name = scale_name
    # add a series of residual blocks at each scale, from smallest to largest
    for scale_i in reversed(range(depth)):
        num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1))
        last_scale_name = last_name
        for res_i in range(num_res_blocks):
            block_name = 'res_{}_{}'.format(scale_i, res_i)
            add_conv_block(net, block_name + '_b0', last_name, num_res_filters, 3, activation=activation)
            add_conv_block(net, block_name + '_b1', block_name + '_b0', num_res_filters, 1, activation='linear')
            if last_name == last_scale_name:
                # tranform residual connection to same number of filters
                add_conv_block(net, block_name + '_res', last_name, num_res_filters, 1, activation='linear')
            else:
                # no transform needed when the last node was part of the current residual block
                net.add_node(Layer(), block_name + '_res', last_name)
            net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_b1', block_name + '_res'])
            last_name = block_name
        # theano doesn't seem to support fractionally-strided convolutions at the moment
        up_name = 'up_{}'.format(scale_i)
        net.add_node(UpSampling2D(), up_name, last_name)
        last_name = up_name
        last_scale_name = up_name
    # final output
    add_conv_block(net, 'out', last_name, 3, 9, activation='linear')
    net.add_node(Activation('linear'), 'texture_rgb', 'out', create_output=True)
    return net 
Example #12
Source File: Segmentation_Models.py    From brain_segmentation with MIT License 5 votes vote down vote up
def comp_two_path(self):
        '''
        compiles two-path model, takes in a 4x33x33 patch and assesses global and local paths, then merges the results.
        '''
        print 'Compiling two-path model...'
        model = Graph()
        model.add_input(name='input', input_shape=(self.n_chan, 33, 33))

        # local pathway, first convolution/pooling
        model.add_node(Convolution2D(64, 7, 7, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c1', input= 'input')
        model.add_node(MaxPooling2D(pool_size=(4,4), strides=(1,1), border_mode='valid'), name='local_p1', input='local_c1')

        # local pathway, second convolution/pooling
        model.add_node(Dropout(0.5), name='drop_lp1', input='local_p1')
        model.add_node(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c2', input='drop_lp1')
        model.add_node(MaxPooling2D(pool_size=(2,2), strides=(1,1), border_mode='valid'), name='local_p2', input='local_c2')

        # global pathway
        model.add_node(Convolution2D(160, 13, 13, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='global', input='input')

        # merge local and global pathways
        model.add_node(Dropout(0.5), name='drop_lp2', input='local_p2')
        model.add_node(Dropout(0.5), name='drop_g', input='global')
        model.add_node(Convolution2D(5, 21, 21, border_mode='valid', activation='relu',  W_regularizer=l1l2(l1=0.01, l2=0.01)), name='merge', inputs=['drop_lp2', 'drop_g'], merge_mode='concat', concat_axis=1)

        # Flatten output of 5x1x1 to 1x5, perform softmax
        model.add_node(Flatten(), name='flatten', input='merge')
        model.add_node(Dense(5, activation='softmax'), name='dense_output', input='flatten')
        model.add_output(name='output', input='dense_output')

        sgd = SGD(lr=0.005, decay=0.1, momentum=0.9)
        model.compile('sgd', loss={'output':'categorical_crossentropy'})
        print 'Done.'
        return model 
Example #13
Source File: embedding_transformer.py    From socialsent with Apache License 2.0 5 votes vote down vote up
def get_model(inputdim, outputdim, regularization_strength=0.01, lr=0.000, cosine=False, **kwargs):
    transformation = Dense(inputdim, init='identity',
                           W_constraint=Orthogonal())

    model = Graph()
    model.add_input(name='embeddings1', input_shape=(inputdim,))
    model.add_input(name='embeddings2', input_shape=(inputdim,))
    model.add_shared_node(transformation, name='transformation',
                          inputs=['embeddings1', 'embeddings2'],
                          outputs=['transformed1', 'transformed2'])
    model.add_node(Lambda(lambda x: x[:, :outputdim]), input='transformed1', name='projected1')
    model.add_node(Lambda(lambda x: -x[:, :outputdim]), input='transformed2', name='negprojected2')

    if cosine:
        model.add_node(Lambda(lambda x:  x / K.reshape(K.sqrt(K.sum(x * x, axis=1)), (x.shape[0], 1))),
                       name='normalized1', input='projected1')
        model.add_node(Lambda(lambda x:  x / K.reshape(K.sqrt(K.sum(x * x, axis=1)), (x.shape[0], 1))),
                       name='negnormalized2', input='negprojected2')
        model.add_node(Lambda(lambda x: K.reshape(K.sum(x, axis=1), (x.shape[0], 1))),
                       name='distances', inputs=['normalized1', 'negnormalized2'], merge_mode='mul')
    else:
        model.add_node(Lambda(lambda x: K.reshape(K.sqrt(K.sum(x * x, axis=1)), (x.shape[0], 1))),
                       name='distances', inputs=['projected1', 'negprojected2'], merge_mode='sum')

    model.add_output(name='y', input='distances')
    model.compile(loss={'y': lambda y, d: K.mean(y * d)}, optimizer=SimpleSGD())
    return model