Python keras.callbacks.TensorBoard() Examples

The following are 30 code examples of keras.callbacks.TensorBoard(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.callbacks , or try the search function .
Example #1
Source File: __init__.py    From ImageAI with MIT License 6 votes vote down vote up
def _create_callbacks(self, saved_weights_name, model_to_save):

        checkpoint = CustomModelCheckpoint(
            model_to_save=model_to_save,
            filepath=saved_weights_name + 'ex-{epoch:03d}--loss-{loss:08.3f}.h5',
            monitor='loss',
            verbose=0,
            save_best_only=True,
            mode='min',
            period=1
        )
        reduce_on_plateau = ReduceLROnPlateau(
            monitor='loss',
            factor=0.1,
            patience=2,
            verbose=0,
            mode='min',
            epsilon=0.01,
            cooldown=0,
            min_lr=0
        )
        tensor_board = TensorBoard(
            log_dir=self.__logs_directory
        )
        return [checkpoint, reduce_on_plateau, tensor_board] 
Example #2
Source File: train.py    From 3D-Medical-Segmentation-GAN with Apache License 2.0 6 votes vote down vote up
def train_seg_model(model, splitted_npy_dataset_path, test_path, epochs):
    test_XY = np.load(test_path+'/test.npy')
    X_test, Y_test = test_XY[0], test_XY[1]

    batch_dirs = listdir(splitted_npy_dataset_path)
    len_batch_dirs = len(batch_dirs)

    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints = []
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    for epoch in range(epochs):
        print('Epoch: {0}/{1}'.format(epoch+1, epochs))
        model.fit_generator(data_gen(splitted_npy_dataset_path), steps_per_epoch=batch_size, epochs=int(len_batch_dirs/batch_size), callbacks=checkpoints)

        scores = model.evaluate(X_test, Y_test)
        dice_score = dice_coefficient(model.predict(X_test), Y_test)
        print('Test loss:', scores[0], '\nTest accuracy:', scores[1], '\nDice Coefficient Accuracy:', dice_score)
    return model

# Training GAN: 
Example #3
Source File: model.py    From PyMLProjects with MIT License 6 votes vote down vote up
def init_logging_callbacks(self,log_dir=LOG_DIR_ROOT):

		self.checkpoint = ModelCheckpoint(filepath="%s/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" % (log_dir),\
														monitor='loss',\
														verbose=1,\
														save_best_only=True,\
														mode='min')

		self.early_stopping = EarlyStopping(monitor='loss',\
													min_delta=0,\
													patience=PATIENCE,\
													verbose=0,\
													mode='auto')	

		now = datetime.utcnow().strftime("%Y%m%d%H%M%S")	
		log_dir = "{}/run/{}".format(LOG_DIR_ROOT,now)
		self.tensorboard = TensorBoard(log_dir=log_dir,\
											write_graph=True,\
											write_images=True)
		
		self.callbacks = [self.early_stopping,\
								self.tensorboard,\
								self.checkpoint] 
Example #4
Source File: hous_price.py    From deep_learning with MIT License 6 votes vote down vote up
def main():
    house_df = pd.read_csv('./data/housing.csv', sep='\s+', header=None)
    hose_set = house_df.values
    # print(hose_set)
    x = hose_set[:, 0:13]
    y = hose_set[:, 13]
    # print(y)

    # tbcallback=callbacks.TensorBoard(log_dir='./logs',histogram_freq=0, write_graph=True, write_images=True)
    estimators = []
    estimators.append(('mlp', KerasRegressor(build_fn=build_model, epochs=512, batch_size=32, verbose=1)))
    pipeline = Pipeline(estimators)
    kfold = KFold(n_splits=10, random_state=seed)

    # results = cross_val_score(estimator, x, y, cv=kfold)
    scores = cross_val_score(pipeline, x, y, cv=kfold)
    print('\n')
    print("Results: %.2f (%.2f) MSE" % (scores.mean(), scores.std())) 
Example #5
Source File: testing_utils.py    From ntm_keras with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return 
Example #6
Source File: run_utils.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def get_callbacks(config_data, appendix=''):
    ret_callbacks = []
    model_stored = False
    callbacks = config_data['callbacks']
    if K._BACKEND == 'tensorflow':
        tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
        ret_callbacks.append(tensor_board)
    for callback in callbacks:
        if callback['name'] == 'early_stopping':
            ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
        elif callback['name'] == 'model_checkpoit':
            model_stored = True
            path = config_data['output_path']
            basename = config_data['output_basename']
            base_path = os.path.join(path, basename)
            opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
            save_best = bool(callback['save_best_only'])
            ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
    return ret_callbacks, model_stored 
Example #7
Source File: train.py    From Dog-Cat-Classifier with Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)
    import numpy
    model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model 
Example #8
Source File: trainer.py    From segmentation-unet-maskrcnn with MIT License 6 votes vote down vote up
def train_net(weights_folder, logs_folder, progress_predict_dir, config, loss_mode):

    print("start train net")
    train_dataset = build_dataset(config.TRAIN_DIR)
    val_dataset = build_dataset(config.VAL_DIR)
    x_trn, y_trn =  get_patches_dataset(train_dataset, config, shuffleOn=True, amt= config.AMT_TRAIN)
    x_val, y_val = get_patches_dataset(val_dataset, config, shuffleOn=False, amt= config.AMT_VAL)
    model = get_unet(config, loss_mode)
    os.makedirs(weights_folder, exist_ok=True)
    #model.load_weights('weights/unet_cl2_step0_e5_tr600_v600_jk0.6271')
    model_checkpoint = ModelCheckpoint(os.path.join(weights_folder,'unet_tmp.hdf5'), monitor='loss', save_best_only=True)
    tb_callback = TensorBoard(log_dir=logs_folder, histogram_freq=0, batch_size=config.BATCH_SIZE,
                              write_graph=True, write_grads=False, write_images=True,
                              embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
    start_time = time.time()
    for i in range(config.N_STEPS):
        print("Step i", i)
        model.fit(x_trn, y_trn, batch_size= config.BATCH_SIZE, epochs= config.EPOCS, verbose=1, shuffle=True,
                  callbacks=[model_checkpoint, tb_callback], validation_data=(x_val, y_val))

        print("---  Training for %s seconds ---" % (time.time() - start_time))
        score, trs = calc_jacc_img_msk(model, x_trn, y_trn, config.BATCH_SIZE, config.NUM_CLASSES)
        print('train jk', score)

        score, trs = calc_jacc_img_msk(model, x_val, y_val, config.BATCH_SIZE, config.NUM_CLASSES)
        print('val jk', score)
        score_str = '%.4f' % score
        model_name = 'unet_cl{0}_step{1}_e{2}_tr{3}_v{4}_jk{5}'.format(config.NUM_CLASSES, i, config.EPOCS,
                                                                       config.AMT_TRAIN, config.AMT_VAL,score_str)
        print("Weights: ", model_name)
        model.save_weights(os.path.join(weights_folder, model_name))

        #if (i % 10 == 0):
        check_predict_gold(model, model_name, progress_predict_dir, config, loss_mode)
        check_predict_small_test(model, model_name, progress_predict_dir, config, loss_mode)

        #Get ready for next step
        del x_trn
        del y_trn
        x_trn, y_trn = get_patches_dataset(train_dataset, config, shuffleOn=True, amt=config.AMT_TRAIN)
    return model 
Example #9
Source File: train.py    From YOLO-3D-Box with MIT License 6 votes vote down vote up
def train(model, image_data, y_true, log_dir='logs/'):
    '''retrain/fine-tune the model'''
    model.compile(optimizer='adam', loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
        monitor='val_loss', save_weights_only=True, save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')

    model.fit([image_data, *y_true],
              np.zeros(len(image_data)),
              validation_split=.1,
              batch_size=32,
              epochs=30,
              callbacks=[logging, checkpoint, early_stopping])
    model.save_weights(log_dir + 'trained_weights.h5')
    # Further training. 
Example #10
Source File: main.py    From perfect_match with MIT License 6 votes vote down vote up
def build_tensorboard(tmp_generator, tb_folder):
        for a_file in os.listdir(tb_folder):
            file_path = join(tb_folder, a_file)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
            except Exception as e:
                print(e, file=sys.stderr)

        tb = TensorBoard(tb_folder, write_graph=False, histogram_freq=1, write_grads=True, write_images=False)
        x, y = next(tmp_generator)

        tb.validation_data = x
        tb.validation_data[1] = np.expand_dims(tb.validation_data[1], axis=-1)
        if isinstance(y, list):
            num_targets = len(y)
            tb.validation_data += [y[0]] + y[1:]
        else:
            tb.validation_data += [y]
            num_targets = 1

        tb.validation_data += [np.ones(x[0].shape[0])] * num_targets + [0.0]
        return tb 
Example #11
Source File: model.py    From alphagozero with MIT License 6 votes vote down vote up
def create_initial_model(name):
    full_filename = os.path.join(conf['MODEL_DIR'], name) + ".h5"
    if os.path.isfile(full_filename):
        model = load_model(full_filename, custom_objects={'loss': loss})
        return model

    model = build_model(name)

    # Save graph in tensorboard. This graph has the name scopes making it look
    # good in tensorboard, the loaded models will not have the scopes.
    tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], name),
            histogram_freq=0, batch_size=1, write_graph=True, write_grads=False)
    tf_callback.set_model(model)
    tf_callback.on_epoch_end(0)
    tf_callback.on_train_end(0)

    from self_play import self_play
    self_play(model, n_games=conf['N_GAMES'], mcts_simulations=conf['MCTS_SIMULATIONS'])
    model.save(full_filename)
    best_filename = os.path.join(conf['MODEL_DIR'], 'best_model.h5')
    model.save(best_filename)
    return model 
Example #12
Source File: train.py    From keras-ctpn with Apache License 2.0 6 votes vote down vote up
def get_call_back():
    """
    定义call back
    :return:
    """
    checkpoint = ModelCheckpoint(filepath='/tmp/ctpn.{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=True,
                                 period=5)

    # 验证误差没有提升
    lr_reducer = ReduceLROnPlateau(monitor='loss',
                                   factor=0.1,
                                   cooldown=0,
                                   patience=10,
                                   min_lr=1e-4)
    log = TensorBoard(log_dir='log')
    return [lr_reducer, checkpoint, log] 
Example #13
Source File: run.py    From Generative-Adversarial-Networks-Projects with MIT License 5 votes vote down vote up
def write_log(callback, name, loss, batch_no):
    """
    Write training summary to TensorBoard
    """
    # for name, value in zip(names, logs):
    summary = tf.Summary()
    summary_value = summary.value.add()
    summary_value.simple_value = loss
    summary_value.tag = name
    callback.writer.add_summary(summary, batch_no)
    callback.writer.flush() 
Example #14
Source File: one_hot_model.py    From tying-wv-and-wc with MIT License 5 votes vote down vote up
def _get_callbacks(self):
        callbacks = [self.model.optimizer.get_lr_scheduler()]
        folder_name = self.get_name()
        self_path = os.path.join(self.checkpoint_path, folder_name)
        if self.checkpoint_path:
            if not os.path.exists(self.checkpoint_path):
                print("Make folder to save checkpoint file to {}".format(self.checkpoint_path))
                os.mkdir(self.checkpoint_path)
            if not os.path.exists(self_path):
                os.mkdir(self_path)

            file_name = "_".join(["model_weights", "{epoch:02d}", "{val_acc:.2f}"]) + ".h5"
            save_callback = ModelCheckpoint(os.path.join(self_path, file_name), save_weights_only=True)
            callbacks += [save_callback]

            if self.tensor_board:
                board_path = os.path.join(self.checkpoint_path, "tensor_board")
                self_board_path = os.path.join(board_path, folder_name)
                if not os.path.exists(board_path):
                    print("Make folder to visualize on TensorBoard to {}".format(board_path))
                    os.mkdir(board_path)
                if not os.path.exists(self_board_path):
                    os.mkdir(self_board_path)
                callbacks += [TensorBoard(self_board_path)]
                print("invoke tensorboard at {}".format(board_path))

        return callbacks 
Example #15
Source File: model.py    From sfcn-opi with MIT License 5 votes vote down vote up
def callback_preparation(model):
    """
    implement necessary callbacks into model.
    :return: list of callback.
    """
    timer = TimerCallback()
    timer.set_model(model)
    tensorboard_callback = TensorBoard(os.path.join(TENSORBOARD_DIR, 'base_tensorboard_logs'))
    checkpoint_callback = ModelCheckpoint(os.path.join(CHECKPOINT_DIR,'base_checkpoint',
                                                       'train_point.h5'), save_best_only=True, period=1)
    return [tensorboard_callback, checkpoint_callback, timer] 
Example #16
Source File: run.py    From Generative-Adversarial-Networks-Projects with MIT License 5 votes vote down vote up
def write_log(callback, name, loss, batch_no):
    """
    Write training summary to TensorBoard
    """
    summary = tf.Summary()
    summary_value = summary.value.add()
    summary_value.simple_value = loss
    summary_value.tag = name
    callback.writer.add_summary(summary, batch_no)
    callback.writer.flush() 
Example #17
Source File: stage2.py    From Generative-Adversarial-Networks-Projects with MIT License 5 votes vote down vote up
def write_log(callback, name, loss, batch_no):
    """
    Write training summary to TensorBoard
    """
    summary = tf.Summary()
    summary_value = summary.value.add()
    summary_value.simple_value = loss
    summary_value.tag = name
    callback.writer.add_summary(summary, batch_no)
    callback.writer.flush() 
Example #18
Source File: stage1.py    From Generative-Adversarial-Networks-Projects with MIT License 5 votes vote down vote up
def write_log(callback, name, loss, batch_no):
    """
    Write training summary to TensorBoard
    """
    summary = tf.Summary()
    summary_value = summary.value.add()
    summary_value.simple_value = loss
    summary_value.tag = name
    callback.writer.add_summary(summary, batch_no)
    callback.writer.flush() 
Example #19
Source File: run.py    From Generative-Adversarial-Networks-Projects with MIT License 5 votes vote down vote up
def write_log(callback, name, loss, batch_no):
    """
    Write training summary to TensorBoard
    """
    # for name, value in zip(names, logs):
    summary = tf.Summary()
    summary_value = summary.value.add()
    summary_value.simple_value = loss
    summary_value.tag = name
    callback.writer.add_summary(summary, batch_no)
    callback.writer.flush() 
Example #20
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_TensorBoard_convnet(tmpdir):
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=num_classes)
    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)

    model = Sequential([
        Conv2D(filters=8, kernel_size=3,
               activation='relu',
               input_shape=input_shape),
        MaxPooling2D(pool_size=2),
        Conv2D(filters=4, kernel_size=(3, 3),
               activation='relu', padding='same'),
        GlobalAveragePooling2D(),
        Dense(num_classes, activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
                                write_images=True, write_grads=True,
                                batch_size=16)
    cbks = [tsb]
    model.summary()
    history = model.fit(x_train, y_train, epochs=2, batch_size=16,
                        validation_data=(x_test, y_test),
                        callbacks=cbks,
                        verbose=0)
    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir() 
Example #21
Source File: mnist-keras.py    From nni with MIT License 5 votes vote down vote up
def train(args, params):
    '''
    Train model
    '''
    x_train, y_train, x_test, y_test = load_mnist_data(args)
    model = create_mnist_model(params)

    # nni
    model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
        validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])

    _, acc = model.evaluate(x_test, y_test, verbose=0)
    LOG.debug('Final result is: %d', acc)
    nni.report_final_result(acc) 
Example #22
Source File: recommend_dnn.py    From deep_learning with MIT License 5 votes vote down vote up
def build_SqModel(x_train,y_train):
    
    model = Sequential()
    model.add(Embedding(100000 + 1, 128, input_length=20))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='linear'))

    model.compile(optimizer=Adam(lr=0.001),loss='binary_crossentropy' ,metrics=['accuracy'])
    callTB = K.callbacks.TensorBoard(log_dir='./logs/dnn_merge-1')
    model.fit(x_train, y_train, epochs=16, batch_size=32,callbacks=[callTB],validation_split=0.2) 
Example #23
Source File: recommend_dnn.py    From deep_learning with MIT License 5 votes vote down vote up
def build_model(x_train,y_train):
    """
    构建网络,训练模型
    """

    print("build network")
    usr_input = Input(shape=(3,))
    usr_x = Embedding(x_train[0].shape[0] + 1, 256, input_length=3)(usr_input)
    print("user_embedding_x:", usr_x.shape)
    usr_x = Flatten()(usr_x)
    usr_x = Dense(128, activation='relu')(usr_x)
    print("user_dense_x:", usr_x.shape)

    mov_input = Input(shape=(3,))
    mov_x = Embedding(x_train[0].shape[0] + 1, 256, input_length=3)(mov_input)
    print("movie_embedding_x:", mov_x.shape)
    mov_x = Flatten()(mov_x)
    mov_x = Dense(128, activation='relu')(mov_x)
    print("movie_dense_x:", mov_x.shape)

    concat_tensor = Concatenate()([usr_x, mov_x])
    print("concat_tensor:", concat_tensor.shape)
    x_tensor = Dense(64, activation='relu')(concat_tensor)
    x_tensor = Dropout(0.5)(x_tensor)
    x_tensor = Dense(32, activation='relu')(x_tensor)
    x_tensor = Dropout(0.3)(x_tensor)
    x_output = Dense(1, activation='linear')(x_tensor)

    print("Model:", usr_input.shape, mov_input.shape, "output_x:", x_output.shape)
    model = Model([usr_input, mov_input], x_output)
    sgd = Adam(lr=0.002)
    model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
    model_png='./models/dnn_recomm_model.png'
    # 显示网络结构 
    if not os.path.exists(model_png):
        utils.plot_model(model,to_file='./models/dnn_recomm_model.png')
    callTB = callbacks.TensorBoard(log_dir='./logs/dnn_merge-1')
    print("training model")
    best_model = callbacks.ModelCheckpoint("./models/dnn_recommend_full.h5", monitor='val_loss', verbose=0, save_best_only=True)
    model.fit(x_train, y_train, epochs=64, batch_size=512,callbacks=[callTB, best_model], validation_split=0.2) 
Example #24
Source File: gen_wrods.py    From deep_learning with MIT License 5 votes vote down vote up
def train_model():
    x, y, sortedcharset = load_data()

    model = build_model(sortedcharset)
    tbCallBack = callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
    best_model = callbacks.ModelCheckpoint("./models/gen_wrods.h5", monitor='val_loss', verbose=0, save_best_only=True)
    model.fit_generator(data_generator(x, y, nbatch_size), steps_per_epoch=512, epochs=16, verbose=1, callbacks=[tbCallBack, best_model],
                        validation_data=data_generator(x, y, nbatch_size), validation_steps=128) 
Example #25
Source File: textAnalysis.py    From deep_learning with MIT License 5 votes vote down vote up
def train_model(input_dim,x_train, y_train, x_test, y_test):
    print(input_dim)    
    print('设计模型 Model...')

    model = Sequential()

    model.add(Embedding(input_dim,EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH))
    model.add(LSTM(256, activation="relu"))
    model.add(Dropout(0.3))
    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(256,activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1,activation="sigmoid"))

    print('编译模型...')   # 使用 adam优化
    sgd = Adam(lr=0.0003)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    
    tbCallBack= callbacks.TensorBoard(log_dir='./logs',histogram_freq=0, write_graph=True, write_images=True)
    # best_model = ModelCheckpoint("./models/text_lstm.h5", monitor='val_loss', verbose=0, save_best_only=True)
    print("训练...")
    model.fit(x_train, y_train, batch_size=batch_size, epochs=3,verbose=1, validation_data=(x_test, y_test),callbacks=[tbCallBack])
    # 
    print("评估...")
    score, accuracy = model.evaluate(x_test, y_test, batch_size=batch_size)
    print('\nTest score:', score)
    print('Test accuracy:', accuracy)

    yaml_string = model.to_yaml()
    with open('./models/text_lstm.yaml', 'w') as outfile:
        outfile.write(yaml_string)
    model.save_weights('./models/text_lstm.h5') 
Example #26
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
    import shutil
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=2)

    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir() 
Example #27
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_TensorBoard_convnet(tmpdir):
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=num_classes)
    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)

    model = Sequential([
        Conv2D(filters=8, kernel_size=3,
               activation='relu',
               input_shape=input_shape),
        MaxPooling2D(pool_size=2),
        Conv2D(filters=4, kernel_size=(3, 3),
               activation='relu', padding='same'),
        GlobalAveragePooling2D(),
        Dense(num_classes, activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
                                write_images=True, write_grads=True,
                                batch_size=16)
    cbks = [tsb]
    model.summary()
    history = model.fit(x_train, y_train, epochs=2, batch_size=16,
                        validation_data=(x_test, y_test),
                        callbacks=cbks,
                        verbose=0)
    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir() 
Example #28
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
    import shutil
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=2)

    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir() 
Example #29
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_TensorBoard_convnet(tmpdir):
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=num_classes)
    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)

    model = Sequential([
        Conv2D(filters=8, kernel_size=3,
               activation='relu',
               input_shape=input_shape),
        MaxPooling2D(pool_size=2),
        Conv2D(filters=4, kernel_size=(3, 3),
               activation='relu', padding='same'),
        GlobalAveragePooling2D(),
        Dense(num_classes, activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,
                                write_images=True, write_grads=True,
                                batch_size=16)
    cbks = [tsb]
    model.summary()
    history = model.fit(x_train, y_train, epochs=2, batch_size=16,
                        validation_data=(x_test, y_test),
                        callbacks=cbks,
                        verbose=0)
    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir() 
Example #30
Source File: train_task_devmap.py    From ncc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def train(self, epochs: int, batch_size: int, **train) -> None:
        from keras.callbacks import TensorBoard
        self.model.fit([train["aux_in"], train["sequences"]], [train["y_1hot"], train["y_1hot"]],
                       epochs=epochs, batch_size=batch_size, verbose=train["verbose"], shuffle=True,
                       callbacks=[TensorBoard(train['log_dir'])])