Python keras.initializers.TruncatedNormal() Examples
The following are 10
code examples of keras.initializers.TruncatedNormal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.initializers
, or try the search function
.
Example #1
Source File: seriesnet.py From seriesnet with MIT License | 5 votes |
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg): def f(input_): residual = input_ layer_out = Conv1D(filters=nb_filter, kernel_size=filter_length, dilation_rate=dilation, activation='linear', padding='causal', use_bias=False, kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42), kernel_regularizer=l2(l2_layer_reg))(input_) layer_out = Activation('selu')(layer_out) skip_out = Conv1D(1,1, activation='linear', use_bias=False, kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out) network_in = Conv1D(1,1, activation='linear', use_bias=False, kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out) network_out = Add()([residual, network_in]) return network_out, skip_out return f
Example #2
Source File: seriesnet.py From seriesnet with MIT License | 5 votes |
def DC_CNN_Model(length): input = Input(shape=(length,1)) l1a, l1b = DC_CNN_Block(32,2,1,0.001)(input) l2a, l2b = DC_CNN_Block(32,2,2,0.001)(l1a) l3a, l3b = DC_CNN_Block(32,2,4,0.001)(l2a) l4a, l4b = DC_CNN_Block(32,2,8,0.001)(l3a) l5a, l5b = DC_CNN_Block(32,2,16,0.001)(l4a) l6a, l6b = DC_CNN_Block(32,2,32,0.001)(l5a) l6b = Dropout(0.8)(l6b) #dropout used to limit influence of earlier data l7a, l7b = DC_CNN_Block(32,2,64,0.001)(l6a) l7b = Dropout(0.8)(l7b) #dropout used to limit influence of earlier data l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b]) l9 = Activation('relu')(l8) l21 = Conv1D(1,1, activation='linear', use_bias=False, kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42), kernel_regularizer=l2(0.001))(l9) model = Model(input=input, output=l21) adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss='mae', optimizer=adam, metrics=['mse']) return model
Example #3
Source File: squeezeDet.py From squeezedet-keras with MIT License | 5 votes |
def _fire_layer(self, name, input, s1x1, e1x1, e3x3, stdd=0.01): """ wrapper for fire layer constructions :param name: name for layer :param input: previous layer :param s1x1: number of filters for squeezing :param e1x1: number of filter for expand 1x1 :param e3x3: number of filter for expand 3x3 :param stdd: standard deviation used for intialization :return: a keras fire layer """ sq1x1 = Conv2D( name = name + '/squeeze1x1', filters=s1x1, kernel_size=(1, 1), strides=(1, 1), use_bias=True, padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu", kernel_regularizer=l2(self.config.WEIGHT_DECAY))(input) ex1x1 = Conv2D( name = name + '/expand1x1', filters=e1x1, kernel_size=(1, 1), strides=(1, 1), use_bias=True, padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu", kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1) ex3x3 = Conv2D( name = name + '/expand3x3', filters=e3x3, kernel_size=(3, 3), strides=(1, 1), use_bias=True, padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu", kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1) return concatenate([ex1x1, ex3x3], axis=3) #wrapper for padding, written in tensorflow. If you want to change to theano you need to rewrite this!
Example #4
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)
Example #5
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)
Example #6
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)
Example #7
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)
Example #8
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)
Example #9
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)
Example #10
Source File: initializers_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_truncated_normal(tensor_shape): _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape, target_mean=0., target_std=None, target_max=2)