Python keras.callbacks.TerminateOnNaN() Examples
The following are 18
code examples of keras.callbacks.TerminateOnNaN().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.callbacks
, or try the search function
.
Example #1
Source File: testing_utils.py From ntm_keras with BSD 3-Clause "New" or "Revised" License | 6 votes |
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True): ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S") log_path = LOG_PATH_BASE + ts + "_-_" + model.name tensorboard = TensorBoard(log_dir=log_path, write_graph=False, #This eats a lot of space. Enable with caution! #histogram_freq = 1, write_images=True, batch_size = model.batch_size, write_grads=True) model_saver = ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1) callbacks = [tensorboard, TerminateOnNaN(), model_saver] for i in testrange: acc = test_model(model, sequence_length=i, verboose=verboose) print("the accuracy for length {0} was: {1}%".format(i,acc)) train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose) for i in testrange: acc = test_model(model, sequence_length=i, verboose=verboose) print("the accuracy for length {0} was: {1}%".format(i,acc)) return
Example #2
Source File: dlfilter.py From rapidtide with Apache License 2.0 | 5 votes |
def train(self): self.intermediatemodelpath = os.path.join(self.modelname, 'model_e{epoch:02d}_v{val_loss:.4f}.h5') if self.usetensorboard: tensorboard = TensorBoard(log_dir=self.intermediatemodelpath + "logs/{}".format(time())) self.model.fit(self.train_x, self.train_y, verbose=1, callbacks=[tensorboard]) else: if self.num_pretrain_epochs > 0: print('pretraining model to reproduce input data') self.history = self.model.fit( self.train_y, self.train_y, batch_size=1024, epochs=self.num_pretrain_epochs, shuffle=True, verbose=1, callbacks=[TerminateOnNaN(), ModelCheckpoint(self.intermediatemodelpath)], validation_data=(self.val_y, self.val_y)) self.history = self.model.fit( self.train_x, self.train_y, batch_size=1024, epochs=self.num_epochs, shuffle=True, verbose=1, callbacks=[TerminateOnNaN(), ModelCheckpoint(self.intermediatemodelpath)], validation_data=(self.val_x, self.val_y)) self.savemodel(usehdf=True) self.savemodel(usehdf=False) self.trained = True
Example #3
Source File: train.py From CSBDeep with BSD 3-Clause "New" or "Revised" License | 5 votes |
def prepare_model(model, optimizer, loss, metrics=('mse','mae'), loss_bg_thresh=0, loss_bg_decay=0.06, Y=None): """ TODO """ from keras.optimizers import Optimizer isinstance(optimizer,Optimizer) or _raise(ValueError()) loss_standard = eval('loss_%s()'%loss) _metrics = [eval('loss_%s()'%m) for m in metrics] callbacks = [TerminateOnNaN()] # checks assert 0 <= loss_bg_thresh <= 1 assert loss_bg_thresh == 0 or Y is not None if loss == 'laplace': assert K.image_data_format() == "channels_last", "TODO" assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0 # loss if loss_bg_thresh == 0: _loss = loss_standard else: freq = np.mean(Y > loss_bg_thresh) # print("class frequency:", freq) alpha = K.variable(1.0) loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss)) _loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh, 0.5 / (0.1 + (1 - freq)), 0.5 / (0.1 + freq), alpha) callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha')) if not loss in metrics: _metrics.append(loss_standard) # compile model model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics) return callbacks
Example #4
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp)
Example #5
Source File: train.py From alphagozero with MIT License | 4 votes |
def train(model, game_model_name, epochs=None): if epochs is None: epochs = EPOCHS_PER_SAVE name = model.name base_name, index = name.split('_') new_name = "_".join([base_name, str(int(index) + 1)]) + ".h5" tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], new_name), histogram_freq=conf['HISTOGRAM_FREQ'], batch_size=BATCH_SIZE, write_graph=False, write_grads=False) nan_callback = TerminateOnNaN() directory = os.path.join("games", game_model_name) indices, weights = load_moves(directory) for epoch in tqdm.tqdm(range(epochs), desc="Epochs"): for worker in tqdm.tqdm(range(NUM_WORKERS), desc="Worker_batch"): chosen = choices(indices, weights, k = BATCH_SIZE) X = np.zeros((BATCH_SIZE, SIZE, SIZE, 17)) policy_y = np.zeros((BATCH_SIZE, SIZE*SIZE + 1)) value_y = np.zeros((BATCH_SIZE, 1)) for j, (game_n, move) in enumerate(chosen): filename = os.path.join(directory, GAME_FILE % game_n) with h5py.File(filename, 'r') as f: board = f['move_%s/board' % move][:] policy = f['move_%s/policy_target' % move][:] value_target = f['move_%s/value_target' % move][()] X[j] = board policy_y[j] = policy value_y[j] = value_target fake_epoch = epoch * NUM_WORKERS + worker # For tensorboard model.fit(X, [policy_y, value_y], initial_epoch=fake_epoch, epochs=fake_epoch + 1, validation_split=VALIDATION_SPLIT, # Needed for TensorBoard histograms and gradi callbacks=[tf_callback, nan_callback], verbose=0, ) model.name = new_name.split('.')[0] model.save(os.path.join(conf['MODEL_DIR'], new_name))
Example #6
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp)
Example #7
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #8
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp)
Example #9
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #10
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #11
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #12
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp)
Example #13
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #14
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #15
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp)
Example #16
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])
Example #17
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp)
Example #18
Source File: test_callbacks.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0])