Python keras.engine.topology.InputSpec() Examples
The following are 30
code examples of keras.engine.topology.InputSpec().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.engine.topology
, or try the search function
.
Example #1
Source File: fm_keras.py From KDDCup2019_admin with MIT License | 6 votes |
def __init__(self, feature_num, feature_size, embedding_size, output_dim=1, activation=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(FMLayer, self).__init__(**kwargs) self.output_dim = output_dim self.embedding_size = embedding_size self.activation = activations.get(activation) self.input_spec = InputSpec(ndim=2) self.feature_num = feature_num self.feature_size = feature_size
Example #2
Source File: bilinear_upsampling.py From MobileNetV3 with MIT License | 6 votes |
def __init__(self, size=(1, 1), target_size=None, data_format='default', **kwargs): """Init. size: factor to original shape (ie. original-> size * original). target size: target size (ie. original->target). """ if data_format == 'default': data_format = K.image_data_format() self.size = tuple(size) if target_size is not None: self.target_size = tuple(target_size) else: self.target_size = None assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}' self.data_format = data_format self.input_spec = [InputSpec(ndim=4)] super(BilinearUpSampling2D, self).__init__(**kwargs)
Example #3
Source File: keras_layer_AnchorBoxes.py From ssd_keras with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(AnchorBoxes, self).build(input_shape)
Example #4
Source File: depthwise_conv2d.py From keras-mobilenet with MIT License | 5 votes |
def build(self, input_shape): if len(input_shape) < 4: raise ValueError('Inputs to `SeparableConv2D` should have rank 4. ' 'Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = 3 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs to ' '`SeparableConv2D` ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1], input_dim, self.depth_multiplier) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name='depthwise_kernel', regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint) if self.use_bias: self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) self.built = True
Example #5
Source File: keras_layer_DecodeDetections.py From ssd_keras with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(DecodeDetections, self).build(input_shape)
Example #6
Source File: keras_layer_DecodeDetectionsFast.py From ssd_keras with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(DecodeDetectionsFast, self).build(input_shape)
Example #7
Source File: keras_layer_L2Normalization.py From ssd_keras with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] gamma = self.gamma_init * np.ones((input_shape[self.axis],)) self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name)) self.trainable_weights = [self.gamma] super(L2Normalization, self).build(input_shape)
Example #8
Source File: textClassifierRNN.py From textClassifier with Apache License 2.0 | 5 votes |
def build(self, input_shape): assert len(input_shape)==3 #self.W = self.init((input_shape[-1],1)) self.W = self.init((input_shape[-1],)) #self.input_spec = [InputSpec(shape=input_shape)] self.trainable_weights = [self.W] super(AttLayer, self).build(input_shape) # be sure you call this somewhere!
Example #9
Source File: textClassifierRNN.py From textClassifier with Apache License 2.0 | 5 votes |
def __init__(self, **kwargs): self.init = initializations.get('normal') #self.input_spec = [InputSpec(ndim=3)] super(AttLayer, self).__init__(**kwargs)
Example #10
Source File: keras_dec.py From DEC-Keras with GNU General Public License v3.0 | 5 votes |
def build(self, input_shape): assert len(input_shape) == 2 input_dim = input_shape[1] self.input_spec = [InputSpec(dtype=K.floatx(), shape=(None, input_dim))] self.W = K.variable(self.initial_weights) self.trainable_weights = [self.W]
Example #11
Source File: keras_dec.py From DEC-Keras with GNU General Public License v3.0 | 5 votes |
def __init__(self, output_dim, input_dim=None, weights=None, alpha=1.0, **kwargs): self.output_dim = output_dim self.input_dim = input_dim self.alpha = alpha # kmeans cluster centre locations self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(ClusteringLayer, self).__init__(**kwargs)
Example #12
Source File: DEC.py From DEC-keras with MIT License | 5 votes |
def build(self, input_shape): assert len(input_shape) == 2 input_dim = input_shape[1] self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim)) self.clusters = self.add_weight(shape=(self.n_clusters, input_dim), initializer='glorot_uniform', name='clusters') if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True
Example #13
Source File: DEC.py From DEC-keras with MIT License | 5 votes |
def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(ClusteringLayer, self).__init__(**kwargs) self.n_clusters = n_clusters self.alpha = alpha self.initial_weights = weights self.input_spec = InputSpec(ndim=2)
Example #14
Source File: keras_layer_AnchorBoxes.py From keras-FP16-test with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(AnchorBoxes, self).build(input_shape)
Example #15
Source File: keras_layer_DecodeDetections.py From keras-FP16-test with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(DecodeDetections, self).build(input_shape)
Example #16
Source File: keras_layer_L2Normalization.py From keras-FP16-test with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] gamma = self.gamma_init * np.ones((input_shape[self.axis],)) self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name)) self.trainable_weights = [self.gamma] super(L2Normalization, self).build(input_shape)
Example #17
Source File: fm_keras.py From KDDCup2019_admin with MIT License | 5 votes |
def build(self, input_shape): assert len(input_shape) == 2 input_dim = input_shape[1] numeric_size = input_dim - self.feature_num self.numeric_size = numeric_size all_size = numeric_size + self.feature_size self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim)) self.w_one_hot = self.add_weight(name='one_one_hot', shape=(self.feature_size, self.output_dim), initializer='glorot_uniform', trainable=True) self.w_numeric = self.add_weight(name='one_numeric', shape=(numeric_size, self.output_dim), initializer='glorot_uniform', trainable=True) self.v_one_hot = self.add_weight(name='two_one_hot', shape=(self.feature_size, self.embedding_size), initializer='glorot_uniform', trainable=True) self.v_numeric = self.add_weight(name='two_numeric', shape=(numeric_size, self.embedding_size), initializer='glorot_uniform', trainable=True) self.b = self.add_weight(name='bias', shape=(self.output_dim,), initializer='zeros', trainable=True) super(FMLayer, self).build(input_shape)
Example #18
Source File: autopool.py From autopool with MIT License | 5 votes |
def __init__(self, axis=0, kernel_initializer='zeros', kernel_constraint=None, kernel_regularizer=None, **kwargs): ''' Parameters ---------- axis : int Axis along which to perform the pooling. By default 0 (should be time). kernel_initializer: Initializer for the weights matrix kernel_regularizer: Regularizer function applied to the weights matrix kernel_constraint: Constraint function applied to the weights matrix kwargs ''' if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'), ) super(AutoPool1D, self).__init__(**kwargs) self.axis = axis self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_constraint = constraints.get(kernel_constraint) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.input_spec = InputSpec(min_ndim=3) self.supports_masking = True
Example #19
Source File: autopool.py From autopool with MIT License | 5 votes |
def build(self, input_shape): assert len(input_shape) >= 3 input_dim = input_shape[-1] self.kernel = self.add_weight(shape=(1, input_dim), initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim}) self.built = True
Example #20
Source File: keras_layer_L2Normalization.py From perceptron-benchmark with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] gamma = self.gamma_init * np.ones((input_shape[self.axis],)) self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name)) self.trainable_weights = [self.gamma] super(L2Normalization, self).build(input_shape)
Example #21
Source File: keras_layer_DecodeDetectionsFast.py From perceptron-benchmark with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(DecodeDetectionsFast, self).build(input_shape)
Example #22
Source File: keras_layer_DecodeDetections.py From perceptron-benchmark with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(DecodeDetections, self).build(input_shape)
Example #23
Source File: keras_layer_AnchorBoxes.py From perceptron-benchmark with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(AnchorBoxes, self).build(input_shape)
Example #24
Source File: anchor_boxes.py From DL.EyeSight with GNU General Public License v3.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(AnchorBoxes, self).build(input_shape)
Example #25
Source File: normalization.py From DL.EyeSight with GNU General Public License v3.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] gamma = self.gamma_init * np.ones((input_shape[self.axis],)) self.gamma = K.variable(gamma, name="{}_gamma".format(self.name)) self.trainable_weights = [self.gamma] super(L2Normalization, self).build(input_shape)
Example #26
Source File: ssd_layers.py From aiexamples with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) init_gamma = self.scale * np.ones(shape) self.gamma = K.variable(init_gamma, name=self.name+'_gamma') self.trainable_weights = [self.gamma]
Example #27
Source File: ntm.py From ntm_keras with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build(self, input_shape): bs, input_length, input_dim = input_shape self.controller_input_dim, self.controller_output_dim = controller_input_output_shape( input_dim, self.units, self.m_depth, self.n_slots, self.shift_range, self.read_heads, self.write_heads) # Now that we've calculated the shape of the controller, we have add it to the layer/model. if self.controller is None: self.controller = Dense( name = "controller", activation = 'linear', bias_initializer = 'zeros', units = self.controller_output_dim, input_shape = (bs, input_length, self.controller_input_dim)) self.controller.build(input_shape=(self.batch_size, input_length, self.controller_input_dim)) self.controller_with_state = False # This is a fixed shift matrix self.C = _circulant(self.n_slots, self.shift_range) self.trainable_weights = self.controller.trainable_weights # We need to declare the number of states we want to carry around. # In our case the dimension seems to be 6 (LSTM) or 5 (GRU) or 4 (FF), # see self.get_initial_states, those respond to: # [old_ntm_output] + [init_M, init_wr, init_ww] + [init_h] (LSMT and GRU) + [(init_c] (LSTM only)) # old_ntm_output does not make sense in our world, but is required by the definition of the step function we # intend to use. # WARNING: What self.state_spec does is only poorly understood, # I only copied it from keras/recurrent.py. self.states = [None, None, None, None] self.state_spec = [InputSpec(shape=(None, self.output_dim)), # old_ntm_output InputSpec(shape=(None, self.n_slots, self.m_depth)), # Memory InputSpec(shape=(None, self.read_heads, self.n_slots)), # weights_read InputSpec(shape=(None, self.write_heads, self.n_slots))] # weights_write super(NeuralTuringMachine, self).build(input_shape)
Example #28
Source File: keras_layer_DecodeDetectionsFast.py From keras-FP16-test with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] super(DecodeDetectionsFast, self).build(input_shape)
Example #29
Source File: ConvolutionalMoE.py From mixture-of-experts with GNU General Public License v3.0 | 4 votes |
def build(self, input_shape): if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs should be defined. Found `None`.') input_dim = input_shape[channel_axis] expert_init_std = self.expert_kernel_initializer_scale / np.sqrt(input_dim*np.prod(self.kernel_size)) gating_init_std = self.gating_kernel_initializer_scale / np.sqrt(np.prod(input_shape[1:])) expert_kernel_shape = self.kernel_size + (input_dim, self.n_total_filters) self.expert_kernel = self.add_weight(shape=expert_kernel_shape, initializer=RandomNormal(mean=0., stddev=expert_init_std), name='expert_kernel', regularizer=self.expert_kernel_regularizer, constraint=self.expert_kernel_constraint) gating_kernel_shape = input_shape[1:] + (self.n_filters, self.n_experts_per_filter) self.gating_kernel = self.add_weight(shape=gating_kernel_shape, initializer=RandomNormal(mean=0., stddev=gating_init_std), name='gating_kernel', regularizer=self.gating_kernel_regularizer, constraint=self.gating_kernel_constraint) if self.use_expert_bias: expert_bias_shape = () for i in range(self.rank): expert_bias_shape = expert_bias_shape + (1,) expert_bias_shape = expert_bias_shape + (self.n_filters, self.n_experts_per_filter) self.expert_bias = self.add_weight(shape=expert_bias_shape, initializer=self.expert_bias_initializer, name='expert_bias', regularizer=self.expert_bias_regularizer, constraint=self.expert_bias_constraint) else: self.expert_bias = None if self.use_gating_bias: self.gating_bias = self.add_weight(shape=(self.n_filters, self.n_experts_per_filter), initializer=self.gating_bias_initializer, name='gating_bias', regularizer=self.gating_bias_regularizer, constraint=self.gating_bias_constraint) else: self.gating_bias = None self.o_shape = self.compute_output_shape(input_shape=input_shape) self.new_gating_outputs_shape = (-1,) for i in range(self.rank): self.new_gating_outputs_shape = self.new_gating_outputs_shape + (1,) self.new_gating_outputs_shape = self.new_gating_outputs_shape + (self.n_filters, self.n_experts_per_filter) # Set input spec. self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim}) self.built = True
Example #30
Source File: ConvolutionalMoE.py From mixture-of-experts with GNU General Public License v3.0 | 4 votes |
def __init__(self, n_filters, n_experts_per_filter, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, expert_activation=None, gating_activation=None, use_expert_bias=True, use_gating_bias=True, expert_kernel_initializer_scale=1.0, gating_kernel_initializer_scale=1.0, expert_bias_initializer='zeros', gating_bias_initializer='zeros', expert_kernel_regularizer=None, gating_kernel_regularizer=None, expert_bias_regularizer=None, gating_bias_regularizer=None, expert_kernel_constraint=None, gating_kernel_constraint=None, expert_bias_constraint=None, gating_bias_constraint=None, activity_regularizer=None, **kwargs): if padding == 'causal': if data_format != 'channels_last': raise ValueError('When using causal padding in `Conv1DMoE`, `data_format` must be "channels_last" (temporal data).') super(Conv1DMoE, self).__init__( rank=1, n_filters=n_filters, n_experts_per_filter=n_experts_per_filter, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, expert_activation=expert_activation, gating_activation=gating_activation, use_expert_bias=use_expert_bias, use_gating_bias=use_gating_bias, expert_kernel_initializer_scale=expert_kernel_initializer_scale, gating_kernel_initializer_scale=gating_kernel_initializer_scale, expert_bias_initializer=expert_bias_initializer, gating_bias_initializer=gating_bias_initializer, expert_kernel_regularizer=expert_kernel_regularizer, gating_kernel_regularizer=gating_kernel_regularizer, expert_bias_regularizer=expert_bias_regularizer, gating_bias_regularizer=gating_bias_regularizer, expert_kernel_constraint=expert_kernel_constraint, gating_kernel_constraint=gating_kernel_constraint, expert_bias_constraint=expert_bias_constraint, gating_bias_constraint=gating_bias_constraint, activity_regularizer=activity_regularizer, **kwargs) self.input_spec = InputSpec(ndim=3)