Python tflearn.layers.conv.global_avg_pool() Examples

The following are 11 code examples of tflearn.layers.conv.global_avg_pool(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tflearn.layers.conv , or try the search function .
Example #1
Source File: arch.py    From polar-transformer-networks with MIT License 6 votes vote down vote up
def finalize_get_model(net, flags):
    net['gap'], curr = dup(global_avg_pool(net['conv_final'], name='gap'))

    net['final'] = regression(curr,
                              optimizer='adam',
                              learning_rate=flags.lr,
                              batch_size=flags.bs,
                              loss='softmax_categorical_crossentropy',
                              name='target',
                              n_classes=flags.nc,
                              shuffle_batches=True)

    model = tflearn.DNN(net['final'],
                        tensorboard_verbose=0,
                        tensorboard_dir=flags.logdir,
                        best_checkpoint_path=os.path.join(flags.logdir,
                                                          flags.run_id,
                                                          flags.run_id),
                        best_val_accuracy=flags.acc_save)

    model.net_dict = net
    model.flags = flags

    return model 
Example #2
Source File: ResNeXt.py    From ResNeXt-Tensorflow with MIT License 5 votes vote down vote up
def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling') 
Example #3
Source File: Densenet_MNIST.py    From Densenet-Tensorflow with MIT License 5 votes vote down vote up
def Global_Average_Pooling(x, stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
    It is global average pooling without tflearn
    """

    return global_avg_pool(x, name='Global_avg_pooling')
    # But maybe you need to install h5py and curses or not 
Example #4
Source File: Densenet_Cifar10.py    From Densenet-Tensorflow with MIT License 5 votes vote down vote up
def Global_Average_Pooling(x, stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width, height]
    return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
    It is global average pooling without tflearn
    """

    return global_avg_pool(x, name='Global_avg_pooling')
    # But maybe you need to install h5py and curses or not 
Example #5
Source File: denseNet.py    From cnn_lstm_ctc_ocr_for_ICPR with GNU General Public License v3.0 5 votes vote down vote up
def Global_Average_Pooling(x,stride=1):
    """
    width = np.shape(x)[1]
    height = np.shape(x)[2]
    pool_size = [width,height]
    return tf.layer.average_pooling2d(inputs=x,pool_size=pool_size,strides=stride)
    # The strdie value does not matter.It is global average pooling without tflearn
    """
    return global_avg_pool(x,name='Global_avg_pooling')
    # But maybe you need to install h5py and curses or not 
Example #6
Source File: SE_Inception_resnet_v2.py    From SENet-Tensorflow with MIT License 5 votes vote down vote up
def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling') 
Example #7
Source File: SE_Inception_v4.py    From SENet-Tensorflow with MIT License 5 votes vote down vote up
def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling') 
Example #8
Source File: SE_ResNeXt.py    From SENet-Tensorflow with MIT License 5 votes vote down vote up
def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling') 
Example #9
Source File: network.py    From PRIDNet with MIT License 5 votes vote down vote up
def squeeze_excitation_layer(input_x, out_dim, middle):
    squeeze = global_avg_pool(input_x)
    excitation = tf.layers.dense(squeeze, use_bias=True, units=middle)
    excitation = tf.nn.relu(excitation)
    excitation = tf.layers.dense(excitation, use_bias=True, units=out_dim)
    excitation = tf.nn.sigmoid(excitation)
    excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
    scale = input_x * excitation
    return scale 
Example #10
Source File: network.py    From PRIDNet with MIT License 5 votes vote down vote up
def selective_kernel_layer(sk_conv1, sk_conv2, sk_conv3, middle, out_dim):
    sum_u = sk_conv1 + sk_conv2 + sk_conv3
    squeeze = global_avg_pool(sum_u)
    squeeze = tf.reshape(squeeze, [-1, 1, 1, out_dim])
    z = tf.layers.dense(squeeze, use_bias=True, units=middle)
    z = tf.nn.relu(z)
    a1 = tf.layers.dense(z, use_bias=True, units=out_dim)
    a2 = tf.layers.dense(z, use_bias=True, units=out_dim)
    a3 = tf.layers.dense(z, use_bias=True, units=out_dim)

    before_softmax = tf.concat([a1, a2, a3], 1)
    after_softmax = tf.nn.softmax(before_softmax, dim=1)
    a1 = after_softmax[:, 0, :, :]
    a1 = tf.reshape(a1, [-1, 1, 1, out_dim])
    a2 = after_softmax[:, 1, :, :]
    a2 = tf.reshape(a2, [-1, 1, 1, out_dim])
    a3 = after_softmax[:, 2, :, :]
    a3 = tf.reshape(a3, [-1, 1, 1, out_dim])

    select_1 = sk_conv1 * a1
    select_2 = sk_conv2 * a2
    select_3 = sk_conv3 * a3

    out = select_1 + select_2 + select_3

    return out 
Example #11
Source File: inceptionVxOnFire.py    From fire-detection-cnn with MIT License 4 votes vote down vote up
def construct_inceptionv4onfire(x,y, training=True, enable_batch_norm=True):

    network = input_data(shape=[None, y, x, 3])

    #stem of inceptionV4

    conv1_3_3 = conv_2d(network,32,3,strides=2,activation='relu',name='conv1_3_3_s2',padding='valid')
    conv2_3_3 = conv_2d(conv1_3_3,32,3,activation='relu',name='conv2_3_3')
    conv3_3_3 = conv_2d(conv2_3_3,64,3,activation='relu',name='conv3_3_3')
    b_conv_1_pool = max_pool_2d(conv3_3_3,kernel_size=3,strides=2,padding='valid',name='b_conv_1_pool')
    if enable_batch_norm:
        b_conv_1_pool = batch_normalization(b_conv_1_pool)
    b_conv_1_conv = conv_2d(conv3_3_3,96,3,strides=2,padding='valid',activation='relu',name='b_conv_1_conv')
    b_conv_1 = merge([b_conv_1_conv,b_conv_1_pool],mode='concat',axis=3)

    b_conv4_1_1 = conv_2d(b_conv_1,64,1,activation='relu',name='conv4_3_3')
    b_conv4_3_3 = conv_2d(b_conv4_1_1,96,3,padding='valid',activation='relu',name='conv5_3_3')

    b_conv4_1_1_reduce = conv_2d(b_conv_1,64,1,activation='relu',name='b_conv4_1_1_reduce')
    b_conv4_1_7 = conv_2d(b_conv4_1_1_reduce,64,[1,7],activation='relu',name='b_conv4_1_7')
    b_conv4_7_1 = conv_2d(b_conv4_1_7,64,[7,1],activation='relu',name='b_conv4_7_1')
    b_conv4_3_3_v = conv_2d(b_conv4_7_1,96,3,padding='valid',name='b_conv4_3_3_v')
    b_conv_4 = merge([b_conv4_3_3_v, b_conv4_3_3],mode='concat',axis=3)

    b_conv5_3_3 = conv_2d(b_conv_4,192,3,padding='valid',activation='relu',name='b_conv5_3_3',strides=2)
    b_pool5_3_3 = max_pool_2d(b_conv_4,kernel_size=3,padding='valid',strides=2,name='b_pool5_3_3')
    if enable_batch_norm:
        b_pool5_3_3 = batch_normalization(b_pool5_3_3)
    b_conv_5 = merge([b_conv5_3_3,b_pool5_3_3],mode='concat',axis=3)
    net = b_conv_5

    # inceptionV4 modules

    net=inception_block_a(net)

    net=inception_block_b(net)

    net=inception_block_c(net)

    pool5_7_7=global_avg_pool(net)
    if(training):
        pool5_7_7=dropout(pool5_7_7,0.4)
    loss = fully_connected(pool5_7_7, 2,activation='softmax')

    if(training):
        network = regression(loss, optimizer='rmsprop',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
    else:
        network=loss

    model = tflearn.DNN(network, checkpoint_path='inceptionv4onfire',
                        max_checkpoints=1, tensorboard_verbose=0)

    return model

################################################################################