Python keras.backend.zeros() Examples
The following are 30
code examples of keras.backend.zeros().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: layers.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def build(self, input_shape): # Create mean and count # These are weights because just maintaining variables don't get saved with the model, and we'd like # to have these numbers saved when we save the model. # But we need to make sure that the weights are untrainable. self.mean = self.add_weight(name='mean', shape=input_shape[1:], initializer='zeros', trainable=False) self.count = self.add_weight(name='count', shape=[1], initializer='zeros', trainable=False) # self.mean = K.zeros(input_shape[1:], name='mean') # self.count = K.variable(0.0, name='count') super(MeanStream, self).build(input_shape) # Be sure to call this somewhere!
Example #2
Source File: optimizers.py From keras-lookahead with MIT License | 6 votes |
def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] t = K.cast(self.iterations, K.floatx()) + 1 lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] self.weights = [self.iterations] + ms + vs for p, g, m, v in zip(params, grads, ms, vs): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) self.updates.append(K.update_sub(p, p_t)) return self.updates
Example #3
Source File: QnA.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 6 votes |
def vectorizeData(xContext, xQuestion, xAnswerBeing, xAnswerEnd, word_index, context_maxlen, question_maxlen): '''Vectorize the words to their respective index and pad context to max context length and question to max question length. Answers vectors are padded to the max context length as well. ''' X = [] Xq = [] YBegin = [] YEnd = [] for i in xrange(len(xContext)): x = [word_index[w] for w in xContext[i]] xq = [word_index[w] for w in xQuestion[i]] # map the first and last words of answer span to one-hot representations y_Begin = np.zeros(len(xContext[i])) y_Begin[xAnswerBeing[i]] = 1 y_End = np.zeros(len(xContext[i])) y_End[xAnswerEnd[i]] = 1 X.append(x) Xq.append(xq) YBegin.append(y_Begin) YEnd.append(y_End) return pad_sequences(X, maxlen=context_maxlen, padding='post'), pad_sequences(Xq, maxlen=question_maxlen, padding='post'), pad_sequences(YBegin, maxlen=context_maxlen, padding='post'), pad_sequences(YEnd, maxlen=context_maxlen, padding='post') # Note: Need to download and unzip Glove pre-train model files into same file as this script
Example #4
Source File: model_for_market1501.py From Implementation-CVPR2015-CNN-for-ReID with MIT License | 6 votes |
def cmc(model): def cmc_curve(model, camera1, camera2, rank_max=50): num = camera1.shape[0] rank = [] score = [] camera_batch1 = np.zeros(camera1.shape) for i in range(num): for j in range(num): camera_batch1[j] = camera1[i] similarity_batch = model.predict_on_batch([camera_batch1, camera2]) sim_trans = similarity_batch.transpose() similarity_rate_sorted = np.argsort(sim_trans[0]) for k in range(num): if similarity_rate_sorted[k] == i: rank.append(k+1) break rank_val = 0 for i in range(rank_max): rank_val = rank_val + len([j for j in rank if i == j-1]) score.append(rank_val / float(num)) return np.array(score) a,b = get_data_for_cmc() return cmc_curve(model,a,b)
Example #5
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.input_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.input_dim)), K.zeros((input_shape[0], self.output_dim))]
Example #6
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] self.input_dim = input_shape[2] self.W = self.init((self.output_dim, 4 * self.input_dim), name='{}_W'.format(self.name)) self.U = self.inner_init((self.input_dim, 4 * self.input_dim), name='{}_U'.format(self.name)) self.b = K.variable(np.hstack((np.zeros(self.input_dim), K.get_value(self.forget_bias_init((self.input_dim,))), np.zeros(self.input_dim), np.zeros(self.input_dim))), name='{}_b'.format(self.name)) self.A = self.init((self.input_dim, self.output_dim), name='{}_A'.format(self.name)) self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name)) self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba] if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
Example #7
Source File: rnnrbm.py From keras_bn_library with MIT License | 6 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.hidden_recurrent_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.input_dim))) K.set_value(self.states[2], np.zeros((input_shape[0], self.hidden_dim))) else: self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)), K.zeros((input_shape[0], self.input_dim)), K.zeros((input_shape[0], self.hidden_dim))]
Example #8
Source File: LRN.py From pyslam with GNU General Public License v3.0 | 6 votes |
def call(self, x, mask=None): s = K.shape(x) b = s[0] r = s[1] c = s[2] ch = s[3] half_n = self.n // 2 # half the local region input_sqr = K.square(x) # square the input extra_channels = K.zeros((b, r, c, ch + 2 * half_n)) input_sqr = K.concatenate([extra_channels[:, :, :, :half_n],input_sqr, extra_channels[:, :, :, half_n + ch:]], axis = 3) scale = self.k # offset for the scale norm_alpha = self.alpha / self.n # normalized alpha for i in range(self.n): scale += norm_alpha * input_sqr[:, :, :, i:i+ch] scale = scale ** self.beta x = x / scale return x
Example #9
Source File: capsule.py From CapsNet with MIT License | 6 votes |
def call(self, inputs, **kwargs): # (batch_size, 1, input_num_capsule, input_dim_capsule) expand_inputs = K.expand_dims(inputs, axis=1) # (batch_size, num_capsule, input_num_capsule, input_dim_capsule) expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1)) # (batch_size, num_capsule, input_num_capsule, dim_capsule) u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs) if self.num_routing <= 0: self.num_routing = 3 # (batch_size, num_capsule, input_num_capsule) b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule)) for i in xrange(self.num_routing): # (batch_size, num_capsule, input_num_capsule) c = softmax(b, axis=1) # (batch_size, num_capsule, dim_capsule) s = K.batch_dot(c, u_hat, axes=[2, 2]) squashed_s = squash(s) if i < self.num_routing - 1: # (batch_size, num_capsule, input_num_capsule) b += K.batch_dot(squashed_s, u_hat, axes=[2, 3]) return squashed_s
Example #10
Source File: embedding.py From onto-lstm with Apache License 2.0 | 6 votes |
def build(self, input_shape): # input shape is (batch_size, num_words, num_senses, num_hyps) self.num_senses = input_shape[-2] self.num_hyps = input_shape[-1] - 1 # -1 because the last value is a word index # embedding of size 1. if self.set_sense_priors: self.sense_priors = self._get_initial_sense_priors((self.word_index_size, 1), name='{}_sense_priors'.format(self.name)) else: # OntoLSTM makes sense proabilities uniform if the passed sense parameters are zero. self.sense_priors = K.zeros((self.word_index_size, 1)) # uniform sense probs # Keeping aside the initial weights to not let Embedding set them. It wouldn't know what sense priors are. if self.initial_weights is not None: self.onto_aware_embedding_weights = self.initial_weights self.initial_weights = None # The following method will set self.trainable_weights super(OntoAwareEmbedding, self).build(input_shape) # input_shape will not be used by Embedding's build. if not self.tune_embedding: # Move embedding to non_trainable_weights self._non_trainable_weights.append(self._trainable_weights.pop()) if self.set_sense_priors: self._trainable_weights.append(self.sense_priors) if self.onto_aware_embedding_weights is not None: self.set_weights(self.onto_aware_embedding_weights)
Example #11
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, a complete ' 'input_shape must be provided ' '(including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.units))) else: self.states = [K.zeros((input_shape[0], self.units))]
Example #12
Source File: rnnlayer.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, a complete ' 'input_shape must be provided ' '(including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.units))) else: self.states = [K.zeros((input_shape[0], self.units))]
Example #13
Source File: layers.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def build(self, input_shape): assert len(input_shape) >= 3 self.input_spec = [InputSpec(shape=input_shape)] if not self.layer.built: self.layer.build(input_shape) self.layer.built = True super(AttentionLSTMWrapper, self).build() if hasattr(self.attention_vec, '_keras_shape'): attention_dim = self.attention_vec._keras_shape[1] else: raise Exception('Layer could not be build: No information about expected input shape.') self.U_a = self.layer.inner_init((self.layer.output_dim, self.layer.output_dim), name='{}_U_a'.format(self.name)) self.b_a = K.zeros((self.layer.output_dim,), name='{}_b_a'.format(self.name)) self.U_m = self.layer.inner_init((attention_dim, self.layer.output_dim), name='{}_U_m'.format(self.name)) self.b_m = K.zeros((self.layer.output_dim,), name='{}_b_m'.format(self.name)) if self.single_attention_param: self.U_s = self.layer.inner_init((self.layer.output_dim, 1), name='{}_U_s'.format(self.name)) self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name)) else: self.U_s = self.layer.inner_init((self.layer.output_dim, self.layer.output_dim), name='{}_U_s'.format(self.name)) self.b_s = K.zeros((self.layer.output_dim,), name='{}_b_s'.format(self.name)) self.trainable_weights = [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]
Example #14
Source File: layers.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def call(self, x, mask=None): # input shape: (nb_samples, time (padded with zeros), input_dim) # note that the .build() method of subclasses MUST define # self.input_spec with a complete input shape. input_shape = self.input_spec[0].shape if K._BACKEND == 'tensorflow': if not input_shape[1]: raise Exception('When using TensorFlow, you should define ' 'explicitly the number of timesteps of ' 'your sequences.\n' 'If your first layer is an Embedding, ' 'make sure to pass it an "input_length" ' 'argument. Otherwise, make sure ' 'the first layer has ' 'an "input_shape" or "batch_input_shape" ' 'argument, including the time axis. ' 'Found input shape at layer ' + self.name + ': ' + str(input_shape)) if self.layer.stateful: initial_states = self.layer.states else: initial_states = self.layer.get_initial_states(x) constants = self.get_constants(x) preprocessed_input = self.layer.preprocess_input(x) last_output, outputs, states = K.rnn(self.step, preprocessed_input, initial_states, go_backwards=self.layer.go_backwards, mask=mask, constants=constants, unroll=self.layer.unroll, input_length=input_shape[1]) if self.layer.stateful: self.updates = [] for i in range(len(states)): self.updates.append((self.layer.states[i], states[i])) if self.layer.return_sequences: return outputs else: return last_output
Example #15
Source File: custom.py From DLWP with MIT License | 5 votes |
def __init__(self, loss_function, lats, data_format='channels_last', weighting='cosine'): """ Initialize a weighted loss. :param loss_function: method: Keras loss function to apply after the weighting :param lats: ndarray: 1-dimensional array of latitude coordinates :param data_format: Keras data_format ('channels_first' or 'channels_last') :param weighting: str: type of weighting to apply. Options are: cosine: weight by the cosine of the latitude (default) midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost to the mid-latitudes """ self.loss_function = loss_function self.lats = lats self.data_format = K.normalize_data_format(data_format) if weighting not in ['cosine', 'midlatitude']: raise ValueError("'weighting' must be one of 'cosine' or 'midlatitude'") self.weighting = weighting lat_tensor = K.zeros(lats.shape) print(lats) lat_tensor.assign(K.cast_to_floatx(lats[:])) self.weights = K.cos(lat_tensor * np.pi / 180.) if self.weighting == 'midlatitude': self.weights = self.weights - 0.25 * K.sin(lat_tensor * 2 * np.pi / 180.) self.is_init = False self.__name__ = 'latitude_weighted_loss'
Example #16
Source File: layers.py From recurrent-attention-for-QA-SQUAD-based-on-keras with MIT License | 5 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, a complete ' 'input_shape must be provided ' '(including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim))]
Example #17
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def __init__(self, name="average_recall", labels=1, **kwargs): super(average_recall, self).__init__(name=name, **kwargs) self.labels = labels self.tp = K.zeros(labels, dtype="int32") self.fn = K.zeros(labels, dtype="int32")
Example #18
Source File: train.py From landmark-recognition-challenge with GNU General Public License v3.0 | 5 votes |
def zero_loss(y_true, y_pred): return K.zeros(shape=(1,))
Example #19
Source File: hierarchical_softmax.py From nli_generation with MIT License | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=shape) for shape in input_shape] input_dim = self.input_spec[0].shape[-1] self.W1 = self.init((input_dim, self.n_classes), name='{}_W1'.format(self.name)) self.b1 = K.zeros((self.n_classes,), name='{}_b1'.format(self.name)) self.W2 = self.init((self.n_classes, input_dim, self.n_outputs_per_class), name='{}_W2'.format(self.name)) self.b2 = K.zeros((self.n_classes, self.n_outputs_per_class), name='{}_b2'.format(self.name)) self.trainable_weights = [self.W1, self.b1, self.W2, self.b2]
Example #20
Source File: train.py From landmark-recognition-challenge with GNU General Public License v3.0 | 5 votes |
def reset_accuracy(self, group=-1, save = False): self.accuracy_reached = False self.last_accuracies = np.zeros(AccuracyReset.N_BATCHES) self.last_accuracies_i = 0 if group != -1 and save: self.model.save( self.filepath.format(group= group, epoch= self.epoch + 1), overwrite=True) return # Callback to monitor accuracy on a per-batch basis
Example #21
Source File: temporal_mean_rate_theano.py From snn_toolbox with MIT License | 5 votes |
def reset(self, sample_idx): """Reset layer variables.""" self.reset_spikevars(sample_idx) mod = self.config.getint('simulation', 'reset_between_nth_sample') mod = mod if mod else sample_idx + 1 if sample_idx % mod == 0: self.spikerate_pre.set_value(np.zeros(self.input_shape, k.floatx()))
Example #22
Source File: temporal_mean_rate_theano.py From snn_toolbox with MIT License | 5 votes |
def build(self, input_shape): """Creates the layer neurons and connections.. Parameters ---------- input_shape: Union[list, tuple, Any] Keras tensor (future input to layer) or list/tuple of Keras tensors to reference for weight shape computations. """ MaxPooling2D.build(self, input_shape) self.init_neurons(input_shape) self.spikerate_pre = k.variable(np.zeros(input_shape)) self.previous_x = k.variable(np.zeros(input_shape))
Example #23
Source File: temporal_mean_rate_theano.py From snn_toolbox with MIT License | 5 votes |
def init_neurons(self, input_shape): """Init layer neurons.""" from snntoolbox.bin.utils import get_log_keys, get_plot_keys output_shape = self.compute_output_shape(input_shape) self.v_thresh = k.variable(self._v_thresh) self.mem = k.variable(self.init_membrane_potential(output_shape)) self.time = k.variable(self.dt) # To save memory and computations, allocate only where needed: if self.tau_refrac > 0: self.refrac_until = k.zeros(output_shape) if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts', 'hist_spikerates_activations', 'operations', 'synaptic_operations_b_t', 'neuron_operations_b_t', 'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) | get_log_keys(self.config))): self.spiketrain = k.zeros(output_shape) if self.online_normalization: self.spikecounts = k.zeros(output_shape) self.max_spikerate = k.variable(0) if self.payloads: self.payloads = k.zeros(output_shape) self.payloads_sum = k.zeros(output_shape) if clamp_var: self.spikerate = k.zeros(input_shape) self.var = k.zeros(input_shape) if hasattr(self, 'clamp_idx'): self.clamp_idx = self.get_clamp_idx()
Example #24
Source File: temporal_mean_rate_theano.py From snn_toolbox with MIT License | 5 votes |
def reset_spikevars(self, sample_idx): """ Reset variables present in spiking layers. Can be turned off for instance when a video sequence is tested. """ mod = self.config.getint('simulation', 'reset_between_nth_sample') mod = mod if mod else sample_idx + 1 do_reset = sample_idx % mod == 0 if do_reset: self.mem.set_value(self.init_membrane_potential()) self.time.set_value(np.float32(self.dt)) if self.tau_refrac > 0: self.refrac_until.set_value(np.zeros(self.output_shape, k.floatx())) if self.spiketrain is not None: self.spiketrain.set_value(np.zeros(self.output_shape, k.floatx())) if self.payloads: self.payloads.set_value(np.zeros(self.output_shape, k.floatx())) self.payloads_sum.set_value(np.zeros(self.output_shape, k.floatx())) if self.online_normalization and do_reset: self.spikecounts.set_value(np.zeros(self.output_shape, k.floatx())) self.max_spikerate.set_value(np.float32(0.)) self.v_thresh.set_value(np.float32(self._v_thresh)) if clamp_var and do_reset: self.spikerate.set_value(np.zeros(self.input_shape, k.floatx())) self.var.set_value(np.zeros(self.input_shape, k.floatx()))
Example #25
Source File: layers.py From keras-gp with MIT License | 5 votes |
def build(self, input_shape): """Create the internal variables for communication with GP backend. Arguments: ---------- input_shape: Keras tensor (future input to layer) or list/tuple of Keras tensors to reference for weight shape computations. """ assert len(input_shape) == 2 input_dim = input_shape[-1] self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim)) # Configure GP backend self.backend.configure(input_dim, self.hyp, **self.backend_config) # Internal shared variables self._dlik_dh = K.zeros((self.nb_train_samples, input_dim)) self._batch_ids = K.variable(np.zeros(self.batch_size), dtype='int32') self._batch_sz = K.variable(self.batch_size, dtype='int32') # Internal metrics self._nlml = K.variable(0.) self._mse = K.variable(0.) self.built = True
Example #26
Source File: layers.py From research with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] if self.stateful: self.reset_states() else: # initial states: all-zero tensor of shape (output_dim) self.states = [None] input_dim = input_shape[2] self.input_dim = input_dim self.V = self.init((self.output_dim, input_dim-self.control_dim), name='{}_V'.format(self.name)) self.W = self.init((input_dim, self.output_dim), name='{}_W'.format(self.name)) self.U = self.inner_init((self.output_dim, self.output_dim), name='{}_U'.format(self.name)) self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name)) self.ext_b = K.zeros((input_dim-self.control_dim,), name='{}_ext_b'.format(self.name)) self.regularizers = [] if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) if self.U_regularizer: self.U_regularizer.set_param(self.U) self.regularizers.append(self.U_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) self.trainable_weights = [self.W, self.U, self.b, self.V, self.ext_b] if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
Example #27
Source File: layers.py From research with BSD 3-Clause "New" or "Revised" License | 5 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.output_dim))]
Example #28
Source File: layers.py From research with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] if self.stateful: self.reset_states() else: # initial states: all-zero tensor of shape (output_dim) self.states = [None] input_dim = input_shape[2] self.input_dim = input_dim self.V = self.init((self.output_dim, input_dim), name='{}_V'.format(self.name)) self.W = self.init((input_dim, self.output_dim), name='{}_W'.format(self.name)) self.U = self.inner_init((self.output_dim, self.output_dim), name='{}_U'.format(self.name)) self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name)) self.ext_b = K.zeros((input_dim,), name='{}_ext_b'.format(self.name)) self.regularizers = [] if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) if self.U_regularizer: self.U_regularizer.set_param(self.U) self.regularizers.append(self.U_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) self.trainable_weights = [self.W, self.U, self.b, self.V, self.ext_b] if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
Example #29
Source File: layers.py From research with BSD 3-Clause "New" or "Revised" License | 5 votes |
def build(self, input_shape): if self.dim_ordering == 'th': stack_size = input_shape[1] self.W_shape = (self.nb_filter, stack_size, self.nb_row, self.nb_col) elif self.dim_ordering == 'tf': stack_size = input_shape[3] self.W_shape = (self.nb_row, self.nb_col, self.nb_filter, stack_size) else: raise Exception('Invalid dim_ordering: ' + self.dim_ordering) self.W = self.init(self.W_shape, name='{}/w'.format(self.name)) self.b = K.zeros((self.nb_filter,), name='{}/biases'.format(self.name)) self.trainable_weights = [self.W, self.b] self.regularizers = [] if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.constraints = {} if self.W_constraint: self.constraints[self.W] = self.W_constraint if self.b_constraint: self.constraints[self.b] = self.b_constraint if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
Example #30
Source File: attentive_convlstm.py From sam with MIT License | 5 votes |
def get_initial_states(self, x): initial_state = K.sum(x, axis=1) initial_state = K.conv2d(initial_state, K.zeros((self.nb_filters_out, self.nb_filters_in, 1, 1)), border_mode='same') initial_states = [initial_state for _ in range(len(self.states))] return initial_states