Python tensorflow.keras.regularizers.l2() Examples
The following are 30
code examples of tensorflow.keras.regularizers.l2().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.regularizers
, or try the search function
.
Example #1
Source File: model.py From DexiNed with MIT License | 7 votes |
def __init__(self, out_features,**kwargs): super(_DenseLayer, self).__init__(**kwargs) k_reg = None if w_decay is None else l2(w_decay) self.layers = [] self.layers.append(tf.keras.Sequential( [ layers.ReLU(), layers.Conv2D( filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same', use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg), layers.BatchNormalization(), layers.ReLU(), layers.Conv2D( filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same', use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg), layers.BatchNormalization(), ])) # first relu can be not needed
Example #2
Source File: train.py From object-localization with MIT License | 6 votes |
def create_model(trainable=False): model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet") for layer in model.layers: layer.trainable = trainable block = model.get_layer("block_16_project_BN").output x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block) x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x) model = Model(inputs=model.input, outputs=x) # divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight # see https://arxiv.org/pdf/1711.05101.pdf regularizer = l2(WEIGHT_DECAY / 2) for weight in model.trainable_weights: with tf.keras.backend.name_scope("weight_regularizer"): model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight) return model
Example #3
Source File: model.py From DexiNed with MIT License | 6 votes |
def __init__(self, mid_features, out_features=None, stride=(1,1), use_bn=True,use_act=True,**kwargs): super(DoubleConvBlock, self).__init__(**kwargs) self.use_bn =use_bn self.use_act =use_act out_features = mid_features if out_features is None else out_features k_reg = None if w_decay is None else l2(w_decay) self.conv1 = layers.Conv2D( filters=mid_features, kernel_size=(3, 3), strides=stride, padding='same', use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg) self.bn1 = layers.BatchNormalization() self.conv2 = layers.Conv2D( filters=out_features, kernel_size=(3, 3), padding='same',strides=(1,1), use_bias=True, kernel_initializer=weight_init, kernel_regularizer=k_reg) self.bn2 = layers.BatchNormalization() self.relu = layers.ReLU()
Example #4
Source File: se_resnext.py From keras-squeeze-excite-network with MIT License | 6 votes |
def __initial_conv_block_inception(input_tensor, weight_decay=5e-4): """ Adds an initial conv block, with batch norm and relu for the inception resnext Args: input_tensor: input Keras tensor weight_decay: weight decay factor Returns: a Keras tensor """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #5
Source File: ResNextFPN.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4): init = input grouped_channels = int(filters / cardinality) if init.shape[-1] != 2 * filters: init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) init = BatchNormalization(axis=3)(init) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input) x = BatchNormalization(axis=3)(x) x = Activation('relu')(x) x = grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay) x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=3)(x) x = add([init, x]) x = Activation('relu')(x) return x
Example #6
Source File: dual_path_network.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the DPN Args: input: input tensor initial_conv_filters: number of filters for initial conv block weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #7
Source File: morpho_tagger.py From DeepPavlov with Apache License 2.0 | 6 votes |
def _initialize(self): if isinstance(self.char_window_size, int): self.char_window_size = [self.char_window_size] if self.char_filters is None or isinstance(self.char_filters, int): self.char_filters = [self.char_filters] * len(self.char_window_size) if len(self.char_window_size) != len(self.char_filters): raise ValueError("There should be the same number of window sizes and filter sizes") if isinstance(self.word_lstm_units, int): self.word_lstm_units = [self.word_lstm_units] * self.word_lstm_layers if len(self.word_lstm_units) != self.word_lstm_layers: raise ValueError("There should be the same number of lstm layer units and lstm layers") if self.word_vectorizers is None: self.word_vectorizers = [] if self.regularizer is not None: self.regularizer = l2(self.regularizer) if self.verbose > 0: log.info("{} symbols, {} tags in CharacterTagger".format(len(self.symbols), len(self.tags)))
Example #8
Source File: dual_path_network.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4): ''' Adds a Batchnorm-Relu-Conv block for DPN Args: input: input tensor filters: number of output filters kernel: convolution kernel size stride: stride of convolution Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=stride)(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x
Example #9
Source File: Darknet53.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def conv2d_unit(x, filters, kernels, strides=1): """Convolution Unit This function defines a 2D convolution operation with BN and LeakyReLU. # Arguments x: Tensor, input tensor of conv layer. filters: Integer, the dimensionality of the output space. kernels: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. # Returns Output tensor. """ x = Conv2D(filters, kernels, padding='same', strides=strides, activation='linear', kernel_regularizer=l2(5e-4))(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) return x
Example #10
Source File: SE_HRNet.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def build(self, input_shape, num_output, repetitions=3): input_x = Input(shape=input_shape) feature_maps = self.extract_multi_resolution_feature(repetitions=repetitions)(input_x) x = self.make_classification_head(feature_maps, self.filter_list) x = Conv2D(filters=x.get_shape().as_list()[-1] * 2, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_regularizer=l2(1e-4))(x) x = BatchNormalization(axis=-1)(x, training=self.training) x = Activation("relu")(x) x = GlobalAveragePooling2D()(x) x = Flatten()(x) x = Dense(units=num_output, name='final_fully_connected', kernel_initializer="he_normal", kernel_regularizer=l2(1e-4), activation='softmax')(x) return Model(inputs=input_x, outputs=x)
Example #11
Source File: utime.py From U-Time with MIT License | 6 votes |
def create_seq_modeling(in_, input_dims, data_per_period, n_periods, n_classes, transition_window, name_prefix=""): cls = AveragePooling2D((data_per_period, 1), name="{}average_pool".format(name_prefix))(in_) out = Conv2D(filters=n_classes, kernel_size=(transition_window, 1), activation="softmax", kernel_regularizer=regularizers.l2(1e-5), padding="same", name="{}sequence_conv_out".format(name_prefix))(cls) s = [-1, n_periods, input_dims//data_per_period, n_classes] if s[2] == 1: s.pop(2) # Squeeze the dim out = Lambda(lambda x: tf.reshape(x, s), name="{}sequence_classification_reshaped".format(name_prefix))(out) return out
Example #12
Source File: utime.py From U-Time with MIT License | 6 votes |
def log(self): self.logger("{} Model Summary\n" "-------------------".format(__class__.__name__)) self.logger("N periods: {}".format(self.n_periods)) self.logger("Input dims: {}".format(self.input_dims)) self.logger("N channels: {}".format(self.n_channels)) self.logger("N classes: {}".format(self.n_classes)) self.logger("Kernel size: {}".format(self.kernel_size)) self.logger("Dilation rate: {}".format(self.dilation)) self.logger("CF factor: %.3f" % self.cf) self.logger("Init filters: {}".format(self.init_filters)) self.logger("Depth: %i" % self.depth) self.logger("Poolings: {}".format(self.pools)) self.logger("Transition window {}".format(self.transition_window)) self.logger("Dense activation {}".format(self.dense_classifier_activation)) self.logger("l2 reg: %s" % self.l2_reg) self.logger("Padding: %s" % self.padding) self.logger("Conv activation: %s" % self.activation) self.logger("Receptive field: %s" % self.receptive_field[0]) self.logger("Seq length.: {}".format(self.n_periods*self.input_dims)) self.logger("N params: %i" % self.count_params()) self.logger("Input: %s" % self.input) self.logger("Output: %s" % self.output)
Example #13
Source File: network.py From ivis with GNU General Public License v2.0 | 6 votes |
def triplet_network(base_network, embedding_dims=2, embedding_l2=0.0): def output_shape(shapes): shape1, shape2, shape3 = shapes return (3, shape1[0],) input_a = Input(shape=base_network.input_shape[1:]) input_p = Input(shape=base_network.input_shape[1:]) input_n = Input(shape=base_network.input_shape[1:]) embeddings = Dense(embedding_dims, kernel_regularizer=l2(embedding_l2))(base_network.output) network = Model(base_network.input, embeddings) processed_a = network(input_a) processed_p = network(input_p) processed_n = network(input_n) triplet = Lambda(K.stack, output_shape=output_shape, name='stacked_triplets')([processed_a, processed_p, processed_n],) model = Model([input_a, input_p, input_n], triplet) return model, processed_a, processed_p, processed_n
Example #14
Source File: MNasNet.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def depthwiseConv_bn(x, depth_multiplier, kernel_size, strides=1): """ Depthwise convolution The DepthwiseConv2D is just the first step of the Depthwise Separable convolution (without the pointwise step). Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution (which acts on each input channel separately). This function defines a 2D Depthwise separable convolution operation with BN and relu6. # Arguments x: Tensor, input tensor of conv layer. filters: Integer, the dimensionality of the output space. kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. # Returns Output tensor. """ x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, depth_multiplier=depth_multiplier, padding='same', use_bias=False, kernel_regularizer=regularizers.l2(l=0.0003))(x) x = layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = layers.ReLU(max_value=6)(x) return x
Example #15
Source File: DenseNet.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 6 votes |
def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4): """ Creates a transition layer between dense blocks as transition, which do convolution and pooling. Works as downsampling. """ x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) x = Activation('relu')(x) x = Conv2D(int(nb_channels*compression), (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) # Adding dropout if dropout_rate: x = Dropout(dropout_rate)(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) return x
Example #16
Source File: multiclass_3D_CNN.py From CNNArt with Apache License 2.0 | 6 votes |
def buildModel(patchShape, numClasses): input = Input(shape=patchShape) n_base_fileter = 32 _handle_data_format() conv = Conv3D(filters=n_base_fileter, kernel_size=(7, 7, 7), strides=(2, 2, 2), kernel_initializer="he_normal", )(input) norm = BatchNormalization(axis=CHANNEL_AXIS)(conv) conv1 = Activation("relu")(norm) pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), padding="same")(conv1) flatten1 = Flatten()(pool1) dense = Dense(units=numClasses, kernel_initializer="he_normal", activation="softmax", kernel_regularizer=l2(1e-4))(flatten1) model = Model(inputs=input, outputs=dense) return model
Example #17
Source File: multitask_unet2d.py From MultiPlanarUNet with MIT License | 6 votes |
def log(self): self.logger("Multi-Task UNet Model Summary\n" "-----------------------------") self.logger("N classes: %s" % list(self.n_classes)) self.logger("CF factor: %.3f" % self.cf**2) self.logger("Depth: %i" % self.depth) self.logger("l2 reg: %s" % self.l2_reg) self.logger("Padding: %s" % self.padding) self.logger("Conv activation: %s" % self.activation) self.logger("Out activation: %s" % list(self.out_activation)) self.logger("Receptive field: %s" % self.receptive_field) self.logger("N params: %i" % self.count_params()) self.logger("N tasks: %i" % self.n_tasks) if self.n_tasks > 1: inputs = self.input outputs = self.output else: inputs = [self.input] outputs = [self.output] for i, (id_, in_, out) in enumerate(zip(self.task_IDs, inputs, outputs)): self.logger("\n--- Task %s ---" % id_) self.logger("In shape: %s" % in_.shape) self.logger("Out shape: %s\n" % out.shape)
Example #18
Source File: unet.py From MultiPlanarUNet with MIT License | 6 votes |
def log(self): self.logger("UNet Model Summary\n------------------") self.logger("Image rows: %i" % self.img_shape[0]) self.logger("Image cols: %i" % self.img_shape[1]) self.logger("Image channels: %i" % self.img_shape[2]) self.logger("N classes: %i" % self.n_classes) self.logger("CF factor: %.3f" % self.cf**2) self.logger("Depth: %i" % self.depth) self.logger("l2 reg: %s" % self.l2_reg) self.logger("Padding: %s" % self.padding) self.logger("Conv activation: %s" % self.activation) self.logger("Out activation: %s" % self.out_activation) self.logger("Receptive field: %s" % self.receptive_field) self.logger("N params: %i" % self.count_params()) self.logger("Output: %s" % self.output) self.logger("Crop: %s" % (self.label_crop if np.sum(self.label_crop) != 0 else "None"))
Example #19
Source File: cnnmodule.py From RSN with MIT License | 6 votes |
def _cnn_(cnn_input_shape,name=None): with tf.variable_scope(name or 'convnet', reuse=tf.AUTO_REUSE): convnet = Sequential() convnet.add(Conv1D(230, 3, input_shape = cnn_input_shape, kernel_initializer = W_init, bias_initializer = b_init_conv, kernel_regularizer=l2(2e-4) )) convnet.add(MaxPooling1D(pool_size=cnn_input_shape[0]-4)) convnet.add(Activation('relu')) convnet.add(Flatten()) convnet.add(Dense(cnn_input_shape[-1]*230, activation = 'sigmoid', kernel_initializer = W_init, bias_initializer = b_init_dense, kernel_regularizer=l2(1e-3) )) return convnet
Example #20
Source File: dual_path_network.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def _grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4): ''' Adds a grouped convolution block. It is an equivalent block from the paper Args: input: input tensor grouped_channels: grouped number of filters cardinality: cardinality factor describing the number of groups strides: performs strided convolution for downscaling if > 1 weight_decay: weight decay term Returns: a keras tensor ''' init = input channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 group_list = [] if cardinality == 1: # with cardinality 1, it is a standard convolution x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x for c in range(cardinality): x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels] if K.image_data_format() == 'channels_last' else lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input) x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x) group_list.append(x) group_merge = concatenate(group_list, axis=channel_axis) group_merge = BatchNormalization(axis=channel_axis)(group_merge) group_merge = Activation('relu')(group_merge) return group_merge
Example #21
Source File: MNasNet.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def conv_bn(x, filters, kernel_size, strides=1, alpha=1, activation=True): """Convolution Block This function defines a 2D convolution operation with BN and relu6. # Arguments x: Tensor, input tensor of conv layer. filters: Integer, the dimensionality of the output space. kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. alpha: An integer which multiplies the filters dimensionality activation: A boolean which indicates whether to have an activation after the normalization # Returns Output tensor. """ filters = _make_divisible(filters * alpha) x = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same', use_bias=False, kernel_regularizer=regularizers.l2(l=0.0003))(x) x = layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x) if activation: x = layers.ReLU(max_value=6)(x) return x # Depth-wise Separable Convolution with batch normalization
Example #22
Source File: MiniNetv2.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def convolution(filters, kernel_size, strides=1, dilation_rate=1, use_bias=True): return layers.Conv2D(filters, kernel_size, strides=strides, padding='same', use_bias=use_bias, kernel_regularizer=regularizers.l2(l=0.0003), dilation_rate=dilation_rate) # Traspose convolution
Example #23
Source File: SE_HRNet.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def first_layer(self, x, scope): with tf.name_scope(scope): x = Conv2D(filters=self.init_filters, kernel_size=(3, 3), strides=(2, 2), padding='same', kernel_regularizer=l2(1e-4))(x) x = BatchNormalization(axis=-1)(x, training=self.training) x = Conv2D(filters=self.init_filters, kernel_size=(3, 3), strides=(2, 2), padding='same', kernel_regularizer=l2(1e-4))(x) norm = BatchNormalization(axis=-1)(x, training=self.training) act = Activation("relu")(norm) return act
Example #24
Source File: layers.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def DarknetDepthwiseConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return DepthwiseConv2D(*args, **darknet_conv_kwargs)
Example #25
Source File: layers.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet weight regularizer for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs.update(kwargs) return _DarknetConv2D(*args, **darknet_conv_kwargs)
Example #26
Source File: MiniNetv2.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def separableConv(filters, kernel_size, strides=1, dilation_rate=1, use_bias=True): return layers.SeparableConv2D(filters, kernel_size, strides=strides, padding='same', use_bias=use_bias, depthwise_regularizer=regularizers.l2(l=0.0003), pointwise_regularizer=regularizers.l2(l=0.0003), dilation_rate=dilation_rate)
Example #27
Source File: layers.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def DarknetDepthwiseConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return DepthwiseConv2D(*args, **darknet_conv_kwargs)
Example #28
Source File: layers.py From keras-YOLOv3-model-set with MIT License | 5 votes |
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs)
Example #29
Source File: models.py From tf2-yolo3 with Apache License 2.0 | 5 votes |
def DarknetConv(x, filters, size, strides=1, padding='same', batch_norm=True): x = Conv2D(filters=filters, kernel_size=size, strides=strides, padding=padding, use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x) if batch_norm: x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) return x
Example #30
Source File: models.py From DirectML with MIT License | 5 votes |
def DarknetConv(x, filters, size, strides=1, batch_norm=True): if strides == 1: padding = 'same' else: x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding padding = 'valid' x = Conv2D(filters=filters, kernel_size=size, strides=strides, padding=padding, use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x) if batch_norm: x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) return x