Python keras.backend.batch_set_value() Examples
The following are 14
code examples of keras.backend.batch_set_value().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: common.py From keras-fcn with MIT License | 5 votes |
def load_weights(model, weights_path): """Load weights from Caffe models.""" print("Loading weights...") if h5py is None: raise ImportError('`load_weights` requires h5py.') f = h5py.File(weights_path, mode='r') # New file format. layer_names = [n.decode('utf8') for n in f.attrs['layer_names']] # Reverse index of layer name to list of layers with name. index = {} for layer in model.layers: if layer.name: index.setdefault(layer.name, []).append(layer) # We batch weight value assignments in a single backend call # which provides a speedup in TensorFlow. weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] weight_values = [g[weight_name] for weight_name in weight_names] for layer in index.get(name, []): symbolic_weights = layer.weights # Set values. for i in range(len(weight_values)): weight_value_tuples.append((symbolic_weights[i], weight_values[i])) K.batch_set_value(weight_value_tuples) return layer_names
Example #2
Source File: server.py From recipe-summarization with MIT License | 5 votes |
def load_weights(model, filepath): """Load all weights possible into model from filepath. This is a modified version of keras load_weights that loads as much as it can if there is a mismatch between file and model. It returns the weights of the first layer in which the mismatch has happened """ print('Loading', filepath, 'to', model.name) with h5py.File(filepath, mode='r') as f: # new file format layer_names = [n.decode('utf8') for n in f.attrs['layer_names']] # we batch weight value assignments in a single backend call # which provides a speedup in TensorFlow. weight_value_tuples = [] for name in layer_names: print(name) g = f[name] weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] if len(weight_names): weight_values = [g[weight_name] for weight_name in weight_names] try: layer = model.get_layer(name=name) except: layer = None if not layer: print('failed to find layer', name, 'in model') print('weights', ' '.join(str_shape(w) for w in weight_values)) print('stopping to load all other layers') weight_values = [np.array(w) for w in weight_values] break symbolic_weights = layer.trainable_weights + layer.non_trainable_weights weight_value_tuples += zip(symbolic_weights, weight_values) weight_values = None K.batch_set_value(weight_value_tuples) return weight_values
Example #3
Source File: predict.py From recipe-summarization with MIT License | 5 votes |
def load_weights(model, filepath): """Load all weights possible into model from filepath. This is a modified version of keras load_weights that loads as much as it can if there is a mismatch between file and model. It returns the weights of the first layer in which the mismatch has happened """ print('Loading', filepath, 'to', model.name) with h5py.File(filepath, mode='r') as f: # new file format layer_names = [n.decode('utf8') for n in f.attrs['layer_names']] # we batch weight value assignments in a single backend call # which provides a speedup in TensorFlow. weight_value_tuples = [] for name in layer_names: print(name) g = f[name] weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] if len(weight_names): weight_values = [g[weight_name] for weight_name in weight_names] try: layer = model.get_layer(name=name) except: layer = None if not layer: print('failed to find layer', name, 'in model') print('weights', ' '.join(str_shape(w) for w in weight_values)) print('stopping to load all other layers') weight_values = [np.array(w) for w in weight_values] break symbolic_weights = layer.trainable_weights + layer.non_trainable_weights weight_value_tuples += zip(symbolic_weights, weight_values) weight_values = None K.batch_set_value(weight_value_tuples) return weight_values
Example #4
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #5
Source File: models.py From GroundedTranslation with BSD 3-Clause "New" or "Revised" License | 4 votes |
def partial_load_weights(self, model, f): ''' Keras does not seem to support partially loading weights from one model into another model. This function achieves the same purpose so we can serialise the final RNN hidden state to disk. TODO: find / engineer a more elegant and general approach ''' flattened_layers = model.layers # new file format filtered_layers = [] for layer in flattened_layers: weights = layer.weights if weights: filtered_layers.append(layer) flattened_layers = filtered_layers layer_names = [n.decode('utf8') for n in f.attrs['layer_names']] filtered_layer_names = [] for name in layer_names[:-1]: # -1 so we clip out the output layer g = f[name] weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] if len(weight_names): filtered_layer_names.append(name) layer_names = filtered_layer_names if len(layer_names) != len(flattened_layers): raise Exception('You are trying to load a weight file ' 'containing ' + str(len(layer_names)) + ' layers into a model with ' + str(len(flattened_layers)) + ' layers.') # we batch weight value assignments in a single backend call # which provides a speedup in TensorFlow. weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] weight_values = [g[weight_name] for weight_name in weight_names] layer = flattened_layers[k] symbolic_weights = layer.weights if len(weight_values) != len(symbolic_weights): raise Exception('Layer #' + str(k) + ' (named "' + layer.name + '" in the current model) was found to ' 'correspond to layer ' + name + ' in the save file. ' 'However the new layer ' + layer.name + ' expects ' + str(len(symbolic_weights)) + ' weights, but the saved weights have ' + str(len(weight_values)) + ' elements.') weight_value_tuples += zip(symbolic_weights, weight_values) K.batch_set_value(weight_value_tuples)
Example #6
Source File: keras.py From aetros-cli with MIT License | 4 votes |
def load_weights(model, weights_path): from keras import backend as K if not os.path.isfile(weights_path): raise Exception("File does not exist.") import h5py f = h5py.File(weights_path, mode='r') # new file format layer_names = [n.decode('utf8') for n in f.attrs['layer_names']] if len(layer_names) != len(model.layers): print("Warning: Layer count different") # we batch weight value assignments in a single backend call # which provides a speedup in TensorFlow. weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] weight_names = [n.decode('utf8') for n in g.attrs['weight_names']] layer = model.get_layer(name=name) if layer and len(weight_names): weight_values = [g[weight_name] for weight_name in weight_names] if not hasattr(layer, 'trainable_weights'): print("Layer %s (%s) has no trainable weights, but we tried to load it." % ( name, type(layer).__name__)) else: symbolic_weights = layer.trainable_weights + layer.non_trainable_weights if len(weight_values) != len(symbolic_weights): raise Exception('Layer #' + str(k) + ' (named "' + layer.name + '" in the current model) was found to ' 'correspond to layer ' + name + ' in the save file. ' 'However the new layer ' + layer.name + ' expects ' + str(len(symbolic_weights)) + ' weights, but the saved weights have ' + str(len(weight_values)) + ' elements.') weight_value_tuples += list(zip(symbolic_weights, weight_values)) K.batch_set_value(weight_value_tuples) f.close()
Example #7
Source File: model_store.py From imgclsmob with MIT License | 4 votes |
def _load_weights_from_hdf5_group(f, layers): """ Implements topological (order-based) weight loading. Parameters ---------- f : File A pointer to a HDF5 group. layers : list of np.array List of target layers. """ filtered_layers = [] for layer in layers: weights = layer.weights if weights: filtered_layers.append(layer) layer_names = load_attributes_from_hdf5_group(f, "layer_names") filtered_layer_names = [] for name in layer_names: g = f[name] weight_names = load_attributes_from_hdf5_group(g, "weight_names") if weight_names: filtered_layer_names.append(name) layer_names = filtered_layer_names if len(layer_names) != len(filtered_layers): raise ValueError("You are trying to load a weight file " "containing " + str(len(layer_names)) + " layers into a model with " + str(len(filtered_layers)) + " layers.") weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] weight_names = load_attributes_from_hdf5_group(g, "weight_names") weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names] layer = filtered_layers[k] symbolic_weights = layer.weights weight_values = _preprocess_weights_for_loading( layer=layer, weights=weight_values) if len(weight_values) != len(symbolic_weights): raise ValueError("Layer #" + str(k) + " (named `" + layer.name + "` in the current model) was found to " "correspond to layer " + name + " in the save file. " "However the new layer " + layer.name + " expects " + str(len(symbolic_weights)) + " weights, but the saved weights have " + str(len(weight_values)) + " elements.") weight_value_tuples += zip(symbolic_weights, weight_values) K.batch_set_value(weight_value_tuples)
Example #8
Source File: model_store.py From imgclsmob with MIT License | 4 votes |
def _load_weights_from_hdf5_group_by_name(f, layers): """ Implements name-based weight loading. Parameters ---------- f : File A pointer to a HDF5 group. layers : list of np.array List of target layers. """ # New file format. layer_names = load_attributes_from_hdf5_group(f, "layer_names") # Reverse index of layer name to list of layers with name. index = {} for layer in layers: if layer.name: index.setdefault(layer.name, []).append(layer) weight_value_tuples = [] for k, name in enumerate(layer_names): g = f[name] weight_names = load_attributes_from_hdf5_group(g, "weight_names") weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names] for layer in index.get(name, []): symbolic_weights = layer.weights weight_values = _preprocess_weights_for_loading( layer=layer, weights=weight_values) if len(weight_values) != len(symbolic_weights): warnings.warn("Skipping loading of weights for layer {} due to mismatch in number of weights ({} vs" " {}).".format(layer, len(symbolic_weights), len(weight_values))) continue # Set values. for i in range(len(weight_values)): symbolic_shape = K.int_shape(symbolic_weights[i]) if symbolic_shape != weight_values[i].shape: warnings.warn("Skipping loading of weights for layer {} due to mismatch in shape ({} vs" " {}).".format(layer, symbolic_weights[i].shape, weight_values[i].shape)) continue else: weight_value_tuples.append((symbolic_weights[i], weight_values[i])) K.batch_set_value(weight_value_tuples)
Example #9
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #10
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #11
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #12
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #13
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])
Example #14
Source File: test_topology.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_load_layers(): from keras.layers import ConvLSTM2D, TimeDistributed, Bidirectional, Conv2D, Input from keras.models import Model if K.backend() == 'tensorflow' or K.backend() == 'cntk': inputs = Input(shape=(10, 20, 20, 1)) else: inputs = Input(shape=(10, 1, 20, 20)) td_conv = TimeDistributed(Conv2D(15, (5, 5)))(inputs) bi_convlstm2d = Bidirectional(ConvLSTM2D(10, (3, 3)), merge_mode='concat')(td_conv) model = Model(inputs=inputs, outputs=bi_convlstm2d) weight_value_tuples = [] # TimeDistributed Conv2D layer # use 'channels_first' data format to check that the function is being called correctly for Conv2D # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weight_tensor_td_conv_old = list() weight_tensor_td_conv_old.append(np.zeros((15, 1, 5, 5))) weight_tensor_td_conv_old.append(np.zeros((15,))) td_conv_layer = model.layers[1] td_conv_layer.layer.data_format = 'channels_first' weight_tensor_td_conv_new = topology.preprocess_weights_for_loading( td_conv_layer, weight_tensor_td_conv_old, original_keras_version='1') symbolic_weights = td_conv_layer.weights assert (len(symbolic_weights) == len(weight_tensor_td_conv_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_td_conv_new) # Bidirectional ConvLSTM2D layer # old ConvLSTM2D took a list of 12 weight tensors, returns a list of 3 concatenated larger tensors. weight_tensor_bi_convlstm_old = [] for j in range(2): # bidirectional for i in range(4): weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 15, 10))) # kernel weight_tensor_bi_convlstm_old.append(np.zeros((3, 3, 10, 10))) # recurrent kernel weight_tensor_bi_convlstm_old.append(np.zeros((10,))) # bias bi_convlstm_layer = model.layers[2] weight_tensor_bi_convlstm_new = topology.preprocess_weights_for_loading( bi_convlstm_layer, weight_tensor_bi_convlstm_old, original_keras_version='1') symbolic_weights = bi_convlstm_layer.weights assert (len(symbolic_weights) == len(weight_tensor_bi_convlstm_new)) weight_value_tuples += zip(symbolic_weights, weight_tensor_bi_convlstm_new) K.batch_set_value(weight_value_tuples) assert np.all(K.eval(model.layers[1].weights[0]) == weight_tensor_td_conv_new[0]) assert np.all(K.eval(model.layers[1].weights[1]) == weight_tensor_td_conv_new[1]) assert np.all(K.eval(model.layers[2].weights[0]) == weight_tensor_bi_convlstm_new[0]) assert np.all(K.eval(model.layers[2].weights[1]) == weight_tensor_bi_convlstm_new[1]) assert np.all(K.eval(model.layers[2].weights[2]) == weight_tensor_bi_convlstm_new[2]) assert np.all(K.eval(model.layers[2].weights[3]) == weight_tensor_bi_convlstm_new[3]) assert np.all(K.eval(model.layers[2].weights[4]) == weight_tensor_bi_convlstm_new[4]) assert np.all(K.eval(model.layers[2].weights[5]) == weight_tensor_bi_convlstm_new[5])