Python keras.utils.multi_gpu_model() Examples
The following are 30
code examples of keras.utils.multi_gpu_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.utils
, or try the search function
.
Example #1
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #2
Source File: utils.py From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 | 6 votes |
def load_model(model_path): custom_layers = { "multihead_attention": multihead_attention, "Conv2D": L.Conv2D, "split_heads_2d": split_heads_2d, "local_attention_2d": local_attention_2d, "combine_heads_2d": combine_heads_2d } model = model_from_yaml(open(os.path.join(model_path, "arch.yaml")).read(), custom_objects=custom_layers) full_path = os.path.join(model_path, "weights.h5") with h5py.File(full_path, "r") as w: keys = list(w.keys()) is_para = any(["model" in k for k in keys]) if is_para: para_model = multi_gpu_model(model, gpus=2) para_model.load_weights(full_path) model = para_model.layers[-2] else: model.load_weights(full_path) print("Model " + model_path + " loaded") return model
Example #3
Source File: neural_network.py From kits19.MIScnn with GNU General Public License v3.0 | 6 votes |
def __init__(self, config): # Initialize model model = Unet(input_shape=config["input_shape"], n_labels=config["classes"], activation="sigmoid") # Transform to Keras multi GPU model if config["gpu_number"] > 1: model = multi_gpu_model(model, config["gpu_number"]) # Compile model model.compile(optimizer=Adam(lr=config["learninig_rate"]), loss=tversky_loss, metrics=self.metrics) self.model = model self.config = config # Train the Neural Network model on the provided case ids
Example #4
Source File: _base.py From faceswap with GNU General Public License v3.0 | 6 votes |
def build(self): """ Build the model. Override for custom build methods """ self.add_networks() self.load_models(swapped=False) inputs = self.get_inputs() try: self.build_autoencoders(inputs) except ValueError as err: if "must be from the same graph" in str(err).lower(): msg = ("There was an error loading saved weights. This is most likely due to " "model corruption during a previous save." "\nYou should restore weights from a snapshot or from backup files. " "You can use the 'Restore' Tool to restore from backup.") raise FaceswapError(msg) from err if "multi_gpu_model" in str(err).lower(): raise FaceswapError(str(err)) from err raise err self.log_summary() self.compile_predictors(initialize=True)
Example #5
Source File: gpu_utils.py From talos with MIT License | 6 votes |
def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False): '''Takes as input the model, and returns a model based on the number of GPUs available on the machine or alternatively the 'gpus' user input. NOTE: this needs to be used before model.compile() in the model inputted to Scan in the form: from talos.utils.gpu_utils import multi_gpu model = multi_gpu(model) ''' from keras.utils import multi_gpu_model return multi_gpu_model(model, gpus=gpus, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)
Example #6
Source File: core.py From bi-lstm-crf with Apache License 2.0 | 6 votes |
def __build_model(self, emb_matrix=None): word_input = Input(shape=(None,), dtype='int32', name="word_input") word_emb = Embedding(self.vocab_size + 1, self.embed_dim, weights=[emb_matrix] if emb_matrix is not None else None, trainable=True if emb_matrix is None else False, name='word_emb')(word_input) bilstm_output = Bidirectional(LSTM(self.bi_lstm_units // 2, return_sequences=True))(word_emb) bilstm_output = Dropout(self.dropout_rate)(bilstm_output) output = Dense(self.chunk_size + 1, kernel_initializer="he_normal")(bilstm_output) output = CRF(self.chunk_size + 1, sparse_target=self.sparse_target)(output) model = Model([word_input], [output]) parallel_model = model if self.num_gpu > 1: parallel_model = multi_gpu_model(model, gpus=self.num_gpu) parallel_model.compile(optimizer=self.optimizer, loss=crf_loss, metrics=[crf_accuracy]) return model, parallel_model
Example #7
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #8
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #9
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #10
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #11
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #12
Source File: multivariate_example.py From interp-net with MIT License | 6 votes |
def interp_net(): if gpu_num > 1: dev = "/cpu:0" else: dev = "/gpu:0" with tf.device(dev): main_input = Input(shape=(4*num_features, timestamp), name='input') sci = single_channel_interp(ref_points, hours_look_ahead) cci = cross_channel_interp() interp = cci(sci(main_input)) reconst = cci(sci(main_input, reconstruction=True), reconstruction=True) aux_output = Lambda(lambda x: x, name='aux_output')(reconst) z = Permute((2, 1))(interp) z = GRU(hid, activation='tanh', recurrent_dropout=0.2, dropout=0.2)(z) main_output = Dense(1, activation='sigmoid', name='main_output')(z) orig_model = Model([main_input], [main_output, aux_output]) if gpu_num > 1: model = multi_gpu_model(orig_model, gpus=gpu_num) else: model = orig_model print(orig_model.summary()) return model
Example #13
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def multi_gpu_test_simple_model(): print('####### test simple model') num_samples = 1000 input_dim = 10 output_dim = 1 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 model = keras.models.Sequential() model.add(keras.layers.Dense(hidden_dim, input_shape=(input_dim,))) model.add(keras.layers.Dense(output_dim)) x = np.random.random((num_samples, input_dim)) y = np.random.random((num_samples, output_dim)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit(x, y, epochs=epochs)
Example #14
Source File: models.py From DLWP with MIT License | 6 votes |
def build_model(self, model, gpus=1, **compile_kwargs): """ Compile a Keras Functional model. :param model: keras.models.Model: Keras functional model :param gpus: int: number of GPU units on which to parallelize the Keras model :param compile_kwargs: kwargs passed to the 'compile' method of the Keras model """ # Test the parameters if type(gpus) is not int: raise TypeError("'gpus' argument must be an int") # Self-explanatory util.make_keras_picklable() # Build a model, either on a single GPU or on a CPU to control multiple GPUs self.base_model = model self._n_steps = len(model.outputs) if gpus > 1: import tensorflow as tf with tf.device('/cpu:0'): self.base_model = keras.models.clone_model(self.base_model) self.model = multi_gpu_model(self.base_model, gpus=gpus) self.gpus = gpus else: self.model = self.base_model self.model.compile(**compile_kwargs)
Example #15
Source File: option.py From Looking-to-Listen-at-the-Cocktail-Party with MIT License | 5 votes |
def __init__(self, ser_model, gpus): pmodel = multi_gpu_model(ser_model, gpus) self.__dict__.update(pmodel.__dict__) self._smodel = ser_model
Example #16
Source File: cifar10_cnn.py From Deep-Learning-Quick-Reference with MIT License | 5 votes |
def build_network(num_gpu=1, input_shape=None): inputs = Input(shape=input_shape, name="input") # convolutional block 1 conv1 = Conv2D(64, kernel_size=(3,3), activation="relu", name="conv_1")(inputs) batch1 = BatchNormalization(name="batch_norm_1")(conv1) pool1 = MaxPooling2D(pool_size=(2, 2), name="pool_1")(batch1) # convolutional block 2 conv2 = Conv2D(32, kernel_size=(3,3), activation="relu", name="conv_2")(pool1) batch2 = BatchNormalization(name="batch_norm_2")(conv2) pool2 = MaxPooling2D(pool_size=(2, 2), name="pool_2")(batch2) # fully connected layers flatten = Flatten()(pool2) fc1 = Dense(512, activation="relu", name="fc1")(flatten) d1 = Dropout(rate=0.2, name="dropout1")(fc1) fc2 = Dense(256, activation="relu", name="fc2")(d1) d2 = Dropout(rate=0.2, name="dropout2")(fc2) # output layer output = Dense(10, activation="softmax", name="softmax")(d2) # finalize and compile model = Model(inputs=inputs, outputs=output) if num_gpu > 1: model = multi_gpu_model(model, num_gpu) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"]) return model
Example #17
Source File: _base.py From faceswap with GNU General Public License v3.0 | 5 votes |
def add_predictor(self, side, model): """ Add a predictor to the predictors dictionary """ logger.debug("Adding predictor: (side: '%s', model: %s)", side, model) if self.gpus > 1: logger.debug("Converting to multi-gpu: side %s", side) model = multi_gpu_model(model, self.gpus) self.predictors[side] = model if not self.state.inputs: self.store_input_shapes(model)
Example #18
Source File: _multigpu.py From keras_experiments with The Unlicense | 5 votes |
def __init__(self, ser_model, gpus, *args, **kwargs): # @IgnorePep8 pylint: disable=super-init-not-called pmodel = multi_gpu_model(ser_model, gpus, *args, **kwargs) # mimic copy constructor via __dict__ update, hence no super-init self.__dict__.update(pmodel.__dict__) self._smodel = ser_model
Example #19
Source File: option.py From Looking-to-Listen-at-the-Cocktail-Party with MIT License | 5 votes |
def __init__(self, ser_model, gpus): pmodel = multi_gpu_model(ser_model, gpus) self.__dict__.update(pmodel.__dict__) self._smodel = ser_model
Example #20
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_multi_io_model(): print('####### test multi-io model') num_samples = 1000 input_dim_a = 10 input_dim_b = 5 output_dim_a = 1 output_dim_b = 2 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 input_a = keras.Input((input_dim_a,)) input_b = keras.Input((input_dim_b,)) a = keras.layers.Dense(hidden_dim)(input_a) b = keras.layers.Dense(hidden_dim)(input_b) c = keras.layers.concatenate([a, b]) output_a = keras.layers.Dense(output_dim_a)(c) output_b = keras.layers.Dense(output_dim_b)(c) model = keras.models.Model([input_a, input_b], [output_a, output_b]) a_x = np.random.random((num_samples, input_dim_a)) b_x = np.random.random((num_samples, input_dim_b)) a_y = np.random.random((num_samples, output_dim_a)) b_y = np.random.random((num_samples, output_dim_b)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
Example #21
Source File: yolo.py From human_counter with MIT License | 5 votes |
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors==6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \ if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors/len(self.yolo_model.output) * (num_classes + 5), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if gpu_num>=2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
Example #22
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_invalid_devices(): input_shape = (1000, 10) model = keras.models.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=input_shape[1:])) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='mse', optimizer='rmsprop') x = np.random.random(input_shape) y = np.random.random((input_shape[0], 1)) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=10) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0, 2, 4, 6, 8]) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=1) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0]) parallel_model.fit(x, y, epochs=2)
Example #23
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_multi_io_model(): print('####### test multi-io model') num_samples = 1000 input_dim_a = 10 input_dim_b = 5 output_dim_a = 1 output_dim_b = 2 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 input_a = keras.Input((input_dim_a,)) input_b = keras.Input((input_dim_b,)) a = keras.layers.Dense(hidden_dim)(input_a) b = keras.layers.Dense(hidden_dim)(input_b) c = keras.layers.concatenate([a, b]) output_a = keras.layers.Dense(output_dim_a)(c) output_b = keras.layers.Dense(output_dim_b)(c) model = keras.models.Model([input_a, input_b], [output_a, output_b]) a_x = np.random.random((num_samples, input_dim_a)) b_x = np.random.random((num_samples, input_dim_b)) a_y = np.random.random((num_samples, output_dim_a)) b_y = np.random.random((num_samples, output_dim_b)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
Example #24
Source File: people_flow.py From human_counter with MIT License | 5 votes |
def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' # Load model, or construct model and load weights. num_anchors = len(self.anchors) num_classes = len(self.class_names) is_tiny_version = num_anchors==6 # default setting try: self.yolo_model = load_model(model_path, compile=False) except: self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \ if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match else: assert self.yolo_model.layers[-1].output_shape[-1] == \ num_anchors/len(self.yolo_model.output) * (num_classes + 5), \ 'Mismatch between model and given anchor and class sizes' print('{} model, anchors, and classes loaded.'.format(model_path)) # Generate colors for drawing bounding boxes. hsv_tuples = [(x / len(self.class_names), 1., 1.) for x in range(len(self.class_names))] self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) self.colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors)) np.random.seed(10101) # Fixed seed for consistent colors across runs. np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. np.random.seed(None) # Reset seed to default. # Generate output tensor targets for filtered bounding boxes. self.input_image_shape = K.placeholder(shape=(2, )) if gpu_num>=2: self.yolo_model = multi_gpu_model(self.yolo_model, gpus=gpu_num) boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
Example #25
Source File: multi_gpu_model.py From bidaf-keras with GNU General Public License v3.0 | 5 votes |
def __init__(self, ser_model, gpus=None): pmodel = multi_gpu_model(ser_model, gpus) self.__dict__.update(pmodel.__dict__) self._smodel = ser_model
Example #26
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_invalid_devices(): input_shape = (1000, 10) model = keras.models.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=input_shape[1:])) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='mse', optimizer='rmsprop') x = np.random.random(input_shape) y = np.random.random((input_shape[0], 1)) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=10) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0, 2, 4, 6, 8]) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=1) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0]) parallel_model.fit(x, y, epochs=2)
Example #27
Source File: network.py From diluvian with MIT License | 5 votes |
def make_parallel(model, gpus=None): new_model = multi_gpu_model(model, gpus) func_type = type(model.save) # monkeypatch the save to save just the underlying model def new_save(_, *args, **kwargs): model.save(*args, **kwargs) new_model.save = func_type(new_save, new_model) return new_model
Example #28
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_invalid_devices(): input_shape = (1000, 10) model = keras.models.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=input_shape[1:])) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='mse', optimizer='rmsprop') x = np.random.random(input_shape) y = np.random.random((input_shape[0], 1)) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=10) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0, 2, 4, 6, 8]) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=1) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0]) parallel_model.fit(x, y, epochs=2)
Example #29
Source File: model_ops.py From speech_separation with MIT License | 5 votes |
def __init__(self, ser_model, gpus): pmodel = multi_gpu_model(ser_model, gpus) self.__dict__.update(pmodel.__dict__) self._smodel = ser_model
Example #30
Source File: multi_gpu_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def multi_gpu_test_invalid_devices(): input_shape = (1000, 10) model = keras.models.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=input_shape[1:])) model.add(keras.layers.Dense(1, activation='sigmoid')) model.compile(loss='mse', optimizer='rmsprop') x = np.random.random(input_shape) y = np.random.random((input_shape[0], 1)) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=10) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0, 2, 4, 6, 8]) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=1) parallel_model.fit(x, y, epochs=2) with pytest.raises(ValueError): parallel_model = multi_gpu_model(model, gpus=[0]) parallel_model.fit(x, y, epochs=2)