Python tensorflow.keras.layers.LeakyReLU() Examples
The following are 23
code examples of tensorflow.keras.layers.LeakyReLU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.layers
, or try the search function
.

Example #1
Source File: darknet53.py From imgclsmob with MIT License | 6 votes |
def __init__(self, in_channels, out_channels, alpha, data_format="channels_last", **kwargs): super(DarkUnit, self).__init__(**kwargs) assert (out_channels % 2 == 0) mid_channels = out_channels // 2 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="conv2")
Example #2
Source File: Darknet53.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def residual_block(inputs, filters): """Residual Block This function defines a 2D convolution operation with BN and LeakyReLU. # Arguments x: Tensor, input tensor of residual block. kernels: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. # Returns Output tensor. """ x = conv2d_unit(inputs, filters, (1, 1)) x = conv2d_unit(x, 2 * filters, (3, 3)) x = add([inputs, x]) x = Activation('linear')(x) return x
Example #3
Source File: Darknet53.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def conv2d_unit(x, filters, kernels, strides=1): """Convolution Unit This function defines a 2D convolution operation with BN and LeakyReLU. # Arguments x: Tensor, input tensor of conv layer. filters: Integer, the dimensionality of the output space. kernels: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. # Returns Output tensor. """ x = Conv2D(filters, kernels, padding='same', strides=strides, activation='linear', kernel_regularizer=l2(5e-4))(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) return x
Example #4
Source File: cyclegan-7.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def encoder_layer(inputs, filters=16, kernel_size=3, strides=2, activation='relu', instance_norm=True): """Builds a generic encoder layer made of Conv2D-IN-LeakyReLU IN is optional, LeakyReLU may be replaced by ReLU """ conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same') x = inputs if instance_norm: x = InstanceNormalization()(x) if activation == 'relu': x = Activation('relu')(x) else: x = LeakyReLU(alpha=0.2)(x) x = conv(x) return x
Example #5
Source File: layers.py From CartoonGan-tensorflow with Apache License 2.0 | 6 votes |
def __init__(self, filters=64, lrelu_alpha=0.2, pad_type="constant", norm_type="batch", **kwargs): super(StridedConv, self).__init__(name="StridedConv") self.model = tf.keras.models.Sequential() self.model.add(get_padding(pad_type, (1, 1))) self.model.add(Conv2D(filters, 3, strides=(2, 2))) self.model.add(LeakyReLU(lrelu_alpha)) self.model.add(get_padding(pad_type, (1, 1))) self.model.add(Conv2D(filters * 2, 3)) self.model.add(get_norm(norm_type)) self.model.add(LeakyReLU(lrelu_alpha))
Example #6
Source File: mnist_cifar_models.py From CROWN-IBP with BSD 2-Clause "Simplified" License | 5 votes |
def get_model_meta(filename): print("Loading model " + filename) global use_tf_keras global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K try: from keras.models import load_model as load_model_keras ret = get_model_meta_real(filename, load_model_keras) # model is successfully loaded. Import layers from keras from keras.models import Sequential from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda from keras.layers import Conv2D, MaxPooling2D from keras.layers import LeakyReLU from keras import regularizers from keras import backend as K print("Model imported using keras") except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError): raise except: print("Failed to load model with keras. Trying tf.keras...") use_tf_keras = True from tensorflow.keras.models import load_model as load_model_tf ret = get_model_meta_real(filename, load_model_tf) # model is successfully loaded. Import layers from tensorflow.keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import LeakyReLU from tensorflow.keras import regularizers from tensorflow.keras import backend as K print("Model imported using tensorflow.keras") # put imported functions in global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \ Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K return ret
Example #7
Source File: MiniNetv2.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def call(self, inputs, training=True): outputs = self.model_output(inputs, training=training) # add activations to the ourputs of the model for i in range(len(outputs)): outputs[i] = layers.LeakyReLU(alpha=0.3)(outputs[i]) x = self.upsample1(outputs[0], training=training) x = self.adap_encoder_1(x, training=training) + self.adap_encoder_2(outputs[1], training=training) x = self.adap_encoder_2_1(x, training=training) x = self.adap_encoder_2_2(x, training=training) x = self.adap_encoder_2_22(x, training=training) x = self.upsample2(x, training=training) x = self.adap_encoder_2_3(x, training=training) x += self.adap_encoder_3(outputs[2], training=training) x = self.adap_encoder_3_1(x, training=training) x = self.adap_encoder_3_2(x, training=training) x = self.adap_encoder_3_3(x, training=training) x = self.adap_encoder_3_4(x, training=training) x = self.upsample3(x, training=training) x = self.adap_encoder_3_5(x, training=training) x += reshape_into(self.adap_encoder_4(outputs[3]),x) x = self.adap_encoder_4_1(x, training=training) x = self.adap_encoder_4_2(x, training=training) x = self.upsample4(x, last=True, training=training) x = tf.keras.activations.softmax(x, axis=-1) return x
Example #8
Source File: Unet_Xception_Resnetblock.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def residual_block(blockInput, num_filters=16): x = LeakyReLU(alpha=0.1)(blockInput) x = BatchNormalization()(x) blockInput = BatchNormalization()(blockInput) x = convolution_block(x, num_filters, (3,3) ) x = convolution_block(x, num_filters, (3,3), activation=False) x = Add()([x, blockInput]) return x
Example #9
Source File: Unet_Xception_Resnetblock.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True): x = Conv2D(filters, size, strides=strides, padding=padding)(x) x = BatchNormalization()(x) if activation == True: x = LeakyReLU(alpha=0.1)(x) return x
Example #10
Source File: common.py From tf2-mobile-pose-estimation with Apache License 2.0 | 5 votes |
def __init__(self, alpha=0.25, **kwargs): super(PReLU2, self).__init__(**kwargs) self.active = nn.LeakyReLU(alpha=alpha)
Example #11
Source File: discriminator.py From CartoonGan-tensorflow with Apache License 2.0 | 5 votes |
def __init__(self, base_filters=32, lrelu_alpha=0.2, pad_type="reflect", norm_type="batch"): super(Discriminator, self).__init__(name="Discriminator") if pad_type == "reflect": self.flat_pad = ReflectionPadding2D() elif pad_type == "constant": self.flat_pad = ZeroPadding2D() else: raise ValueError(f"pad_type not recognized {pad_type}") self.flat_conv = Conv2D(base_filters, 3) self.flat_lru = LeakyReLU(lrelu_alpha) self.strided_conv1 = StridedConv(base_filters * 2, lrelu_alpha, pad_type, norm_type) self.strided_conv2 = StridedConv(base_filters * 4, lrelu_alpha, pad_type, norm_type) self.conv2 = Conv2D(base_filters * 8, 3) if norm_type == "instance": self.norm = InstanceNormalization() elif norm_type == "batch": self.norm = BatchNormalization() self.lrelu = LeakyReLU(lrelu_alpha) self.final_conv = Conv2D(1, 3)
Example #12
Source File: mnist_cifar_models.py From CROWN-IBP with BSD 2-Clause "Simplified" License | 5 votes |
def get_model_meta_real(filename, model_loader): model = model_loader(filename, custom_objects = {"fn": lambda y_true, y_pred: y_pred, "tf": tf}) json_string = model.to_json() model_meta = json.loads(json_string) weight_dims = [] activations = set() activation_param = None input_dim = [] # print(model_meta) try: # for keras model_layers = model_meta['config']['layers'] except (KeyError, TypeError): # for tensorflow.keras model_layers = model_meta['config'] for i, layer in enumerate(model_layers): if i ==0 and layer['class_name'] == "Flatten": input_dim = layer['config']['batch_input_shape'] if layer['class_name'] == "Dense": units = layer['config']['units'] weight_dims.append(units) activation = layer['config']['activation'] if activation != 'linear': activations.add(activation) elif layer['class_name'] == "Activation": activation = layer['config']['activation'] activations.add(activation) elif layer['class_name'] == "LeakyReLU": activation_param = layer['config']['alpha'] activations.add("leaky") elif layer['class_name'] == "Lambda": if "arctan" in layer['config']["name"]: activation = "arctan" activations.add("arctan") assert len(activations) == 1, "only one activation is supported," + str(activations) return weight_dims, list(activations)[0], activation_param, input_dim
Example #13
Source File: gatconv.py From dgl with Apache License 2.0 | 5 votes |
def __init__(self, in_feats, out_feats, num_heads, feat_drop=0., attn_drop=0., negative_slope=0.2, residual=False, activation=None): super(GATConv, self).__init__() self._num_heads = num_heads self._in_feats = in_feats self._out_feats = out_feats xinit = tf.keras.initializers.VarianceScaling(scale=np.sqrt( 2), mode="fan_avg", distribution="untruncated_normal") if isinstance(in_feats, tuple): self.fc_src = layers.Dense( out_feats * num_heads, use_bias=False, kernel_initializer=xinit) self.fc_dst = layers.Dense( out_feats * num_heads, use_bias=False, kernel_initializer=xinit) else: self.fc = layers.Dense( out_feats * num_heads, use_bias=False, kernel_initializer=xinit) self.attn_l = tf.Variable(initial_value=xinit( shape=(1, num_heads, out_feats), dtype='float32'), trainable=True) self.attn_r = tf.Variable(initial_value=xinit( shape=(1, num_heads, out_feats), dtype='float32'), trainable=True) self.feat_drop = layers.Dropout(rate=feat_drop) self.attn_drop = layers.Dropout(rate=attn_drop) self.leaky_relu = layers.LeakyReLU(alpha=negative_slope) if residual: if in_feats != out_feats: self.res_fc = layers.Dense( num_heads * out_feats, use_bias=False, kernel_initializer=xinit) else: self.res_fc = Identity() else: self.res_fc = None # self.register_buffer('res_fc', None) self.activation = activation
Example #14
Source File: cyclegan-7.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def decoder_layer(inputs, paired_inputs, filters=16, kernel_size=3, strides=2, activation='relu', instance_norm=True): """Builds a generic decoder layer made of Conv2D-IN-LeakyReLU IN is optional, LeakyReLU may be replaced by ReLU Arguments: (partial) inputs (tensor): the decoder layer input paired_inputs (tensor): the encoder layer output provided by U-Net skip connection & concatenated to inputs. """ conv = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides, padding='same') x = inputs if instance_norm: x = InstanceNormalization()(x) if activation == 'relu': x = Activation('relu')(x) else: x = LeakyReLU(alpha=0.2)(x) x = conv(x) x = concatenate([x, paired_inputs]) return x
Example #15
Source File: dcgan-mnist-4.2.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def build_discriminator(inputs): """Build a Discriminator Model Stack of LeakyReLU-Conv2D to discriminate real from fake. The network does not converge with BN so it is not used here unlike in [1] or original paper. Arguments: inputs (Layer): Input layer of the discriminator (the image) Returns: discriminator (Model): Discriminator Model """ kernel_size = 5 layer_filters = [32, 64, 128, 256] x = inputs for filters in layer_filters: # first 3 convolution layers use strides = 2 # last one uses strides = 1 if filters == layer_filters[-1]: strides = 1 else: strides = 2 x = LeakyReLU(alpha=0.2)(x) x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x) x = Flatten()(x) x = Dense(1)(x) x = Activation('sigmoid')(x) discriminator = Model(inputs, x, name='discriminator') return discriminator
Example #16
Source File: darknet.py From imgclsmob with MIT License | 5 votes |
def dark_convYxY(in_channels, out_channels, alpha, pointwise, data_format="channels_last", **kwargs): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. pointwise : bool Whether use 1x1 (pointwise) convolution or 3x3 convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ if pointwise: return conv1x1_block( in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, **kwargs) else: return conv3x3_block( in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, **kwargs)
Example #17
Source File: ibppose_coco.py From imgclsmob with MIT License | 5 votes |
def __init__(self, passes, backbone_out_channels, outs_channels, depth, growth_rate, use_bn, in_channels=3, in_size=(256, 256), data_format="channels_last", **kwargs): super(IbpPose, self).__init__(**kwargs) self.in_size = in_size self.data_format = data_format activation = nn.LeakyReLU(alpha=0.01) self.backbone = IbpBackbone( in_channels=in_channels, out_channels=backbone_out_channels, activation=activation, data_format=data_format, name="backbone") self.decoder = SimpleSequential(name="decoder") for i in range(passes): merge = (i != passes - 1) self.decoder.add(IbpPass( channels=backbone_out_channels, mid_channels=outs_channels, depth=depth, growth_rate=growth_rate, merge=merge, use_bn=use_bn, activation=activation, data_format=data_format, name="pass{}".format(i + 1)))
Example #18
Source File: cyclegan-7.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 4 votes |
def build_discriminator(input_shape, kernel_size=3, patchgan=True, name=None): """The discriminator is a 4-layer encoder that outputs either a 1-dim or a n x n-dim patch of probability that input is real Arguments: input_shape (tuple): input shape kernel_size (int): kernel size of decoder layers patchgan (bool): whether the output is a patch or just a 1-dim name (string): name assigned to discriminator model Returns: discriminator (Model): """ inputs = Input(shape=input_shape) x = encoder_layer(inputs, 32, kernel_size=kernel_size, activation='leaky_relu', instance_norm=False) x = encoder_layer(x, 64, kernel_size=kernel_size, activation='leaky_relu', instance_norm=False) x = encoder_layer(x, 128, kernel_size=kernel_size, activation='leaky_relu', instance_norm=False) x = encoder_layer(x, 256, kernel_size=kernel_size, strides=1, activation='leaky_relu', instance_norm=False) # if patchgan=True use nxn-dim output of probability # else use 1-dim output of probability if patchgan: x = LeakyReLU(alpha=0.2)(x) outputs = Conv2D(1, kernel_size=kernel_size, strides=2, padding='same')(x) else: x = Flatten()(x) x = Dense(1)(x) outputs = Activation('linear')(x) discriminator = Model(inputs, outputs, name=name) return discriminator
Example #19
Source File: cgan-mnist-4.3.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 4 votes |
def build_discriminator(inputs, labels, image_size): """Build a Discriminator Model Inputs are concatenated after Dense layer. Stack of LeakyReLU-Conv2D to discriminate real from fake. The network does not converge with BN so it is not used here unlike in DCGAN paper. Arguments: inputs (Layer): Input layer of the discriminator (the image) labels (Layer): Input layer for one-hot vector to condition the inputs image_size: Target size of one side (assuming square image) Returns: discriminator (Model): Discriminator Model """ kernel_size = 5 layer_filters = [32, 64, 128, 256] x = inputs y = Dense(image_size * image_size)(labels) y = Reshape((image_size, image_size, 1))(y) x = concatenate([x, y]) for filters in layer_filters: # first 3 convolution layers use strides = 2 # last one uses strides = 1 if filters == layer_filters[-1]: strides = 1 else: strides = 2 x = LeakyReLU(alpha=0.2)(x) x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same')(x) x = Flatten()(x) x = Dense(1)(x) x = Activation('sigmoid')(x) # input is conditioned by labels discriminator = Model([inputs, labels], x, name='discriminator') return discriminator
Example #20
Source File: networks.py From brainstorm with MIT License | 4 votes |
def unet2D(x_in, img_shape, out_im_chans, nf_enc=[64, 64, 128, 128, 256, 256, 512], nf_dec=None, layer_prefix='unet', n_convs_per_stage=1, ): ks = 3 x = x_in encodings = [] encoding_vol_sizes = [] for i in range(len(nf_enc)): for j in range(n_convs_per_stage): x = Conv2D( nf_enc[i], kernel_size=ks, strides=(1, 1), padding='same', name='{}_enc_conv2D_{}_{}'.format(layer_prefix, i, j + 1))(x) x = LeakyReLU(0.2)(x) encodings.append(x) encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1])) if i < len(nf_enc) - 1: x = MaxPooling2D(pool_size=(2, 2), padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x) if nf_dec is None: nf_dec = list(reversed(nf_enc[1:])) for i in range(len(nf_dec)): curr_shape = x.get_shape().as_list()[1:-1] # only do upsample if we are not yet at max resolution if np.any(curr_shape < list(img_shape[:len(curr_shape)])): x = UpSampling2D(size=(2, 2), name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x) # just concatenate the final layer here if i <= len(encodings) - 2: x = _pad_or_crop_to_shape_2D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_vol_sizes[-i-2]) x = Concatenate(axis=-1)([x, encodings[-i-2]]) for j in range(n_convs_per_stage): x = Conv2D(nf_dec[i], kernel_size=ks, padding='same', name='{}_dec_conv2D_{}_{}'.format(layer_prefix, i, j))(x) x = LeakyReLU(0.2)(x) y = Conv2D(out_im_chans, kernel_size=1, padding='same', name='{}_dec_conv2D_final'.format(layer_prefix))(x) # add your own activation after this model # add your own activation after this model return y
Example #21
Source File: networks.py From brainstorm with MIT License | 4 votes |
def unet3D(x_in, img_shape, out_im_chans, nf_enc=[64, 64, 128, 128, 256, 256, 512], nf_dec=None, layer_prefix='unet', n_convs_per_stage=1, ): ks = 3 x = x_in encodings = [] encoding_vol_sizes = [] for i in range(len(nf_enc)): for j in range(n_convs_per_stage): x = Conv3D( nf_enc[i], kernel_size=ks, strides=(1, 1, 1), padding='same', name='{}_enc_conv3D_{}_{}'.format(layer_prefix, i, j + 1))(x) x = LeakyReLU(0.2)(x) encodings.append(x) encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1])) if i < len(nf_enc) - 1: x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x) if nf_dec is None: nf_dec = list(reversed(nf_enc[1:])) for i in range(len(nf_dec)): curr_shape = x.get_shape().as_list()[1:-1] # only do upsample if we are not yet at max resolution if np.any(curr_shape < list(img_shape[:len(curr_shape)])): us = (2, 2, 2) x = UpSampling3D(size=us, name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x) # just concatenate the final layer here if i <= len(encodings) - 2: x = _pad_or_crop_to_shape_3D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_vol_sizes[-i-2]) x = Concatenate(axis=-1)([x, encodings[-i-2]]) for j in range(n_convs_per_stage): x = Conv3D(nf_dec[i], kernel_size=ks, strides=(1, 1, 1), padding='same', name='{}_dec_conv3D_{}_{}'.format(layer_prefix, i, j))(x) x = LeakyReLU(0.2)(x) y = Conv3D(out_im_chans, kernel_size=1, padding='same', name='{}_dec_conv3D_final'.format(layer_prefix))(x) # add your own activation after this model # add your own activation after this model return y
Example #22
Source File: darknet.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, odd_pointwise, avg_pool_size, cls_activ, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DarkNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): stage.add(dark_convYxY( in_channels=in_channels, out_channels=out_channels, alpha=alpha, pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ odd_pointwise), data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels if i != len(channels) - 1: stage.add(MaxPool2d( pool_size=2, strides=2, data_format=data_format, name="pool{}".format(i + 1))) self.features.add(stage) self.output1 = tf.keras.Sequential(name="output1") self.output1.add(Conv2d( in_channels=in_channels, out_channels=classes, kernel_size=1, data_format=data_format, name="final_conv")) if cls_activ: self.output1.add(nn.LeakyReLU(alpha=alpha)) self.output1.add(nn.AveragePooling2D( pool_size=avg_pool_size, strides=1, data_format=data_format, name="final_pool"))
Example #23
Source File: darknet53.py From imgclsmob with MIT License | 4 votes |
def __init__(self, channels, init_block_channels, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DarkNet53, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if j == 0: stage.add(conv3x3_block( in_channels=in_channels, out_channels=out_channels, strides=2, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="unit{}".format(j + 1))) else: stage.add(DarkUnit( in_channels=in_channels, out_channels=out_channels, alpha=alpha, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.AveragePooling2D( pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense( units=classes, input_dim=in_channels, name="output1")