Python keras.backend.set_session() Examples
The following are 30
code examples of keras.backend.set_session().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: ddcn.py From ddan with MIT License | 6 votes |
def __init__(self, nfeatures=50, arch=[8, 'act'], mmd_layer_idx=[1], batch_size=16, supervised=False, confusion=0.0, confusion_incr=1e-3, confusion_max=1, val_data=None, validate_every=1, activations='relu', epochs=1000, optimizer=None, noise=0.0, droprate=0.0, verbose=True): self.batch_size = batch_size self.epochs = epochs self.validate_every = validate_every self.supervised = supervised self.verbose = verbose if val_data is None: self.validate_every = 0 else: self.Xval = val_data[0] self.yval = val_data[1] self._build_model(nfeatures, arch, supervised, confusion, confusion_incr, confusion_max, activations, noise, droprate, mmd_layer_idx, optimizer) self.sess = tf.Session() K.set_session(self.sess) self.sess.run(tf.global_variables_initializer())
Example #2
Source File: workers.py From dist-keras with GNU General Public License v3.0 | 6 votes |
def prepare_model(self): """Prepares the model for training.""" # Set the Keras directory. set_keras_base_directory() if K.backend() == 'tensorflow': # set GPU option allow_growth to False for GPU-enabled tensorflow config = tf.ConfigProto() config.gpu_options.allow_growth = False sess = tf.Session(config=config) K.set_session(sess) # Deserialize the Keras model. self.model = deserialize_keras_model(self.model) self.optimizer = deserialize(self.optimizer) # Compile the model with the specified loss and optimizer. self.model.compile(loss=self.loss, loss_weights = self.loss_weights, optimizer=self.optimizer, metrics=self.metrics)
Example #3
Source File: actor_network.py From costar_plan with Apache License 2.0 | 6 votes |
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE, convolutional=False, output_activation='sigmoid'): self.sess = sess self.BATCH_SIZE = BATCH_SIZE self.TAU = TAU self.LEARNING_RATE = LEARNING_RATE self.convolutional = convolutional self.output_activation = output_activation #K.set_session(sess) #Now create the model self.model , self.weights, self.state = self.create_actor_network(state_size, action_size) self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) self.action_gradient = tf.placeholder(tf.float32,[None, action_size]) self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient) grads = zip(self.params_grad, self.weights) self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads) init_op = tf.global_variables_initializer() self.sess.run(init_op)
Example #4
Source File: critic_network.py From costar_plan with Apache License 2.0 | 6 votes |
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE, convolutional=False): self.sess = sess self.BATCH_SIZE = BATCH_SIZE self.TAU = TAU self.LEARNING_RATE = LEARNING_RATE self.action_size = action_size self.convolutional = convolutional #K.set_session(sess) #Now create the model self.model, self.action, self.state = self.create_critic_network(state_size, action_size) self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size) self.action_grads = tf.gradients(self.model.output, self.action) #GRADIENTS for policy update init_op = tf.global_variables_initializer() self.sess.run(init_op)
Example #5
Source File: cpu.py From costar_plan with Apache License 2.0 | 6 votes |
def ConfigureGPU(args): cpu = True if 'cpu' in args and args['cpu'] else False fraction = 1 if 'gpu_fraction' in args and args['gpu_fraction']: fraction = args['gpu_fraction'] if fraction < 1. or cpu: import tensorflow as tf import keras.backend as K if cpu: config = tf.ConfigProto( device_count={'GPU': 0} ) sess = tf.Session(config=config) else: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) K.set_session(sess)
Example #6
Source File: gpu_utils.py From talos with MIT License | 6 votes |
def parallel_gpu_jobs(allow_growth=True, fraction=.5): '''Sets the max used memory as a fraction for tensorflow backend allow_growth :: True of False fraction :: a float value (e.g. 0.5 means 4gb out of 8gb) ''' import keras.backend as K import tensorflow as tf gpu_options = tf.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = tf.ConfigProto(gpu_options=gpu_options) session = tf.Session(config=config) K.set_session(session)
Example #7
Source File: device_utils.py From keras-text-summarization with MIT License | 6 votes |
def init_devices(device_type=None): if device_type is None: device_type = 'cpu' num_cores = 4 if device_type == 'gpu': num_GPU = 1 num_CPU = 1 else: num_CPU = 1 num_GPU = 0 config = tf.ConfigProto(intra_op_parallelism_threads=num_cores, inter_op_parallelism_threads=num_cores, allow_soft_placement=True, device_count={'CPU': num_CPU, 'GPU': num_GPU}) session = tf.Session(config=config) K.set_session(session)
Example #8
Source File: exp_epic_kitchens.py From videograph with GNU General Public License v3.0 | 6 votes |
def __start_train_model_on_video_frames_videograph(n_epochs, n_timesteps, n_centroids, timestamp, is_resume_training, start_epoch_num): # configure the gpu to be used by keras gpu_core_id = 3 device_id = '/gpu:%d' % gpu_core_id # with graph.as_default(): # with session.as_default(): graph = tf.Graph() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True sess = tf.Session(config=config, graph=graph) K.set_session(sess) with sess: with tf.device(device_id): __train_model_on_video_frames_videograph(n_epochs, n_timesteps, n_centroids, timestamp, is_resume_training, start_epoch_num)
Example #9
Source File: exp_epic_kitchens.py From videograph with GNU General Public License v3.0 | 6 votes |
def __start_train_model_on_video_frames_backbone_i3d_keras(n_epochs, starting_epoch_num, n_frames_per_video, n_instances, instance_num): # configure the gpu to be used by keras gpu_core_id = instance_num - 1 device_id = '/gpu:%d' % gpu_core_id assert instance_num in [1, 2, 3], 'Sorry, wrong instance number: %d' % (instance_num) graph = tf.Graph() config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True sess = tf.Session(config=config, graph=graph) K.set_session(sess) with sess: with tf.device(device_id): __train_model_on_video_frames_backbone_i3d_keras(n_epochs, starting_epoch_num, n_frames_per_video, n_instances, instance_num)
Example #10
Source File: breakout_a3c.py From reinforcement-learning with MIT License | 6 votes |
def __init__(self, action_size): # environment settings self.state_size = (84, 84, 4) self.action_size = action_size self.discount_factor = 0.99 self.no_op_steps = 30 # optimizer parameters self.actor_lr = 2.5e-4 self.critic_lr = 2.5e-4 self.threads = 8 # create model for actor and critic network self.actor, self.critic = self.build_model() # method for training actor and critic network self.optimizer = [self.actor_optimizer(), self.critic_optimizer()] self.sess = tf.InteractiveSession() K.set_session(self.sess) self.sess.run(tf.global_variables_initializer()) self.summary_placeholders, self.update_ops, self.summary_op = self.setup_summary() self.summary_writer = tf.summary.FileWriter('summary/breakout_a3c', self.sess.graph)
Example #11
Source File: tf_util.py From connect4-alpha-zero with MIT License | 6 votes |
def set_session_config(per_process_gpu_memory_fraction=None, allow_growth=None): """ :param allow_growth: When necessary, reserve memory :param float per_process_gpu_memory_fraction: specify GPU memory usage as 0 to 1 :return: """ import tensorflow as tf import keras.backend as K config = tf.ConfigProto( gpu_options=tf.GPUOptions( per_process_gpu_memory_fraction=per_process_gpu_memory_fraction, allow_growth=allow_growth, ) ) sess = tf.Session(config=config) K.set_session(sess)
Example #12
Source File: util.py From openai_lab with MIT License | 6 votes |
def configure_hardware(RAND_SEED): '''configure rand seed, GPU''' from keras import backend as K if K.backend() == 'tensorflow': K.tf.set_random_seed(RAND_SEED) else: K.theano.tensor.shared_randomstreams.RandomStreams(seed=RAND_SEED) if K.backend() != 'tensorflow': # GPU config for tf only return process_num = PARALLEL_PROCESS_NUM if args.param_selection else 1 tf = K.tf gpu_options = tf.GPUOptions( allow_growth=True, per_process_gpu_memory_fraction=1./float(process_num)) config = tf.ConfigProto( gpu_options=gpu_options, allow_soft_placement=True) sess = tf.Session(config=config) K.set_session(sess) return sess
Example #13
Source File: CriticNetwork.py From a-deep-rl-approach-for-sdn-routing-optimization with MIT License | 6 votes |
def __init__(self, sess, state_size, action_size, DDPG_config): self.HIDDEN1_UNITS = DDPG_config['HIDDEN1_UNITS'] self.HIDDEN2_UNITS = DDPG_config['HIDDEN2_UNITS'] self.sess = sess self.BATCH_SIZE = DDPG_config['BATCH_SIZE'] self.TAU = DDPG_config['TAU'] self.LEARNING_RATE = DDPG_config['LRC'] self.action_size = action_size self.h_acti = relu if DDPG_config['HACTI'] == 'selu': self.h_acti = selu K.set_session(sess) #Now create the model self.model, self.action, self.state = self.create_critic_network(state_size, action_size) self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size) self.action_grads = tf.gradients(self.model.output, self.action) #GRADIENTS for policy update self.sess.run(tf.global_variables_initializer())
Example #14
Source File: device_utils.py From keras-video-classifier with MIT License | 6 votes |
def init_devices(device_type=None): if device_type is None: device_type = 'cpu' num_cores = 4 if device_type == 'gpu': num_GPU = 1 num_CPU = 1 else: num_CPU = 1 num_GPU = 0 config = tf.ConfigProto(intra_op_parallelism_threads=num_cores, inter_op_parallelism_threads=num_cores, allow_soft_placement=True, device_count={'CPU': num_CPU, 'GPU': num_GPU}) session = tf.Session(config=config) K.set_session(session)
Example #15
Source File: model.py From mpi_learn with GNU General Public License v3.0 | 6 votes |
def build_model(self, local_session = True): import keras.backend as K if local_session: graph = K.tf.Graph() session = K.tf.Session(graph=graph, config=K.tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, gpu_options=K.tf.GPUOptions( per_process_gpu_memory_fraction=1./self.comm.Get_size()) ) ) with graph.as_default(): with session.as_default(): import keras.backend as K ret_model = self.build_model_aux() ret_model.session = session ret_model.graph = graph return ret_model else: K.set_session( K.tf.Session( config=K.tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, gpu_options=K.tf.GPUOptions( per_process_gpu_memory_fraction=1./self.comm.Get_size()) ) ) ) return self.build_model_aux()
Example #16
Source File: nn_model.py From mercari-price-suggestion with MIT License | 6 votes |
def __init__(self, train_df, word_count, batch_size, epochs): tf.set_random_seed(4) session_conf = tf.ConfigProto(intra_op_parallelism_threads=2, inter_op_parallelism_threads=8) backend.set_session(tf.Session(graph=tf.get_default_graph(), config=session_conf)) self.batch_size = batch_size self.epochs = epochs self.max_name_seq = 10 self.max_item_desc_seq = 75 self.max_text = word_count + 1 self.max_brand = np.max(train_df.brand_name.max()) + 1 self.max_condition = np.max(train_df.item_condition_id.max()) + 1 self.max_subcat0 = np.max(train_df.subcat_0.max()) + 1 self.max_subcat1 = np.max(train_df.subcat_1.max()) + 1 self.max_subcat2 = np.max(train_df.subcat_2.max()) + 1
Example #17
Source File: dann.py From ddan with MIT License | 6 votes |
def __init__(self, nfeatures=50, arch_shared=[32, 'act'], arch_domain=[8, 'act'], arch_clf=[], batch_size=16, supervised=False, val_data=None, validate_every=1, activations='relu', epochs=1000, optimizer=None, noise=0.0, droprate=0.0, stop_at_target_loss=0.0): self.batch_size = batch_size self.epochs = epochs self.validate_every = validate_every self.stop_at_target_loss = stop_at_target_loss if val_data is None: validate_every = 0 else: self.Xval = val_data[0] self.yval = val_data[1] self._build_model(nfeatures, arch_shared, arch_domain, arch_clf, activations, supervised, noise, droprate, optimizer) self.sess = tf.Session() K.set_session(self.sess) self.sess.run(tf.global_variables_initializer())
Example #18
Source File: fttl.py From ddan with MIT License | 6 votes |
def __init__(self, nfeatures=50, arch=[8, 'act', 8, 'act'], fine_tune_layers=[2, 3], batch_size=16, val_data=None, validate_every=1, activations='relu', epochs=5000, epochs_finetune=5000, optimizer=None, optimizer_finetune=None, noise=0.0, droprate=0.0, verbose=True, stop_at_target_loss=0): self.batch_size = batch_size self.validate_every = validate_every self.epochs = epochs self.epochs_finetune = epochs_finetune self.verbose = verbose self.stop_at_target_loss = stop_at_target_loss if val_data is None: self.validate_every = 0 else: self.Xval = val_data[0] self.yval = val_data[1] self._build_model(nfeatures, arch, activations, noise, droprate, optimizer, optimizer_finetune, fine_tune_layers) self.sess = tf.Session() K.set_session(self.sess) self.sess.run(tf.global_variables_initializer())
Example #19
Source File: deepcoral.py From ddan with MIT License | 6 votes |
def __init__(self, nfeatures=50, arch=[8, 'act'], coral_layer_idx=[1], batch_size=16, supervised=False, confusion=1e4, confusion_incr=50, confusion_max=1e9, val_data=None, validate_every=1, activations='relu', epochs=1000, optimizer=None, noise=0.0, droprate=0.0, verbose=True): self.batch_size = batch_size self.epochs = epochs self.validate_every = validate_every self.supervised = supervised self.verbose = verbose if val_data is None: self.validate_every = 0 else: self.Xval = val_data[0] self.yval = val_data[1] self._build_model(nfeatures, arch, supervised, confusion, confusion_incr, confusion_max, activations, noise, droprate, coral_layer_idx, optimizer) self.sess = tf.Session() K.set_session(self.sess) self.sess.run(tf.global_variables_initializer())
Example #20
Source File: ds_breakfast.py From videograph with GNU General Public License v3.0 | 5 votes |
def __config_session_for_keras(gpu_core_id): import keras.backend as K import tensorflow as tf K.clear_session() config = tf.ConfigProto() config.gpu_options.visible_device_list = str(gpu_core_id) config.gpu_options.allow_growth = True session = tf.Session(config=config) K.set_session(session) # endregion # region 4.0 Pickle Features
Example #21
Source File: DHNE.py From OpenHINE with MIT License | 5 votes |
def Process(dataset,dim_feature,embedding_size,hidden_size,learning_rate,alpha,batch_size,num_neg_samples,epochs_to_train,output_embfold,output_modelfold,prefix_path, reflect): config = tf.ConfigProto() config.gpu_options.allow_growth = True K.set_session(tf.Session(config=config)) h = hypergraph(dim_feature,embedding_size,hidden_size,learning_rate,alpha,batch_size,num_neg_samples,epochs_to_train,output_embfold,output_modelfold,prefix_path,reflect) begin = time.time() h.train(dataset) end = time.time() print("time, ", end-begin) # h.save() h.save_embeddings(dataset) K.clear_session()
Example #22
Source File: run_bottleneck.py From CarND-Transfer-Learning-Lab with MIT License | 5 votes |
def main(_): if FLAGS.dataset == 'cifar10': (X_train, y_train), (_, _) = cifar10.load_data() X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0) else: with open('data/train.p', mode='rb') as f: train = pickle.load(f) X_train, X_val, y_train, y_val = train_test_split(train['features'], train['labels'], test_size=0.33, random_state=0) train_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_train') validation_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_validation') print("Resizing to", (w, h, ch)) print("Saving to ...") print(train_output_file) print(validation_output_file) with tf.Session() as sess: K.set_session(sess) K.set_learning_phase(1) model = create_model() print('Bottleneck training') train_gen = gen(sess, X_train, y_train, batch_size) bottleneck_features_train = model.predict_generator(train_gen(), X_train.shape[0]) data = {'features': bottleneck_features_train, 'labels': y_train} pickle.dump(data, open(train_output_file, 'wb')) print('Bottleneck validation') val_gen = gen(sess, X_val, y_val, batch_size) bottleneck_features_validation = model.predict_generator(val_gen(), X_val.shape[0]) data = {'features': bottleneck_features_validation, 'labels': y_val} pickle.dump(data, open(validation_output_file, 'wb'))
Example #23
Source File: spline.py From audio-super-res with MIT License | 5 votes |
def create_model(self, n_dim, r): # load inputs X, _, _ = self.inputs K.set_session(self.sess) with tf.name_scope('generator'): x = X return x
Example #24
Source File: model2.py From audio-super-res with MIT License | 5 votes |
def __init__(self, r=2, opt_params=default_opt): gpu_options = tf.GPUOptions(allow_growth=True) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) K.set_session(self.sess) # pass keras the session # save params self.opt_params = opt_params self.layers = opt_params['layers']
Example #25
Source File: deep_mil.py From TensorFlow-MIL with MIT License | 5 votes |
def main(): # Parse Arguments parser = argparse.ArgumentParser(description='Deep MIL Arguments') parser.add_argument('-e', '--NUM_EPOCHS', default=1, type=int) # Number of epochs for which to train the model parser.add_argument('-r', '--SEED', default=123) # specify the seed parser.add_argument('-b', '--BATCH_SIZE', default=128, type=int) # batch size for training parser.add_argument('-s', '--SAVE_DIRECTORY', default='conv_mil/', type=str) # Where to save model parser.add_argument('-m', '--MODEL_NAME', default='model.h5', type=str) # To save individual run parser.add_argument('-t', '--TRAIN', default=1, type=int) # whether to train (1) or load model (0) parser.add_argument('-v', '--VISUALIZE', default=1, type=int) # whether to visualize the output flags = vars(parser.parse_args()) # Build MNIST dataset dataset = Mnist() # Make save directory if it doesn't exist if not os.path.exists(flags['SAVE_DIRECTORY']): os.makedirs(flags['SAVE_DIRECTORY']) filepath = os.path.join(flags['SAVE_DIRECTORY'], flags['MODEL_NAME']) with tf.Graph().as_default(): with tf.Session() as sess: K.set_session(sess) # Train or load model if flags['TRAIN']: model = train(epochs=flags['NUM_EPOCHS'], seed=flags['SEED'], batch_size=flags['BATCH_SIZE'], dataset=dataset) model.save(filepath) else: model = load_model(filepath) # Visualize with tf_cnnvis visualize(sess, model, dataset)
Example #26
Source File: utils.py From convnet-study with MIT License | 5 votes |
def config_gpu(gpu, allow_growth): # Choosing gpu if gpu == '-1': config = tf.ConfigProto(device_count ={'GPU': 0}) else: if gpu == 'all' or gpu == '': gpu = '' config = tf.ConfigProto() config.gpu_options.visible_device_list = gpu if allow_growth == True: config.gpu_options.allow_growth = True session = tf.Session(config=config) K.set_session(session)
Example #27
Source File: train_mrcnn.py From maskrcnn with MIT License | 5 votes |
def set_debugger_session(): sess = K.get_session() sess = tf_debug.LocalCLIDebugWrapperSession(sess) sess.add_tensor_filter('name_filter', name_filter) K.set_session(sess)
Example #28
Source File: ActorNetwork.py From a-deep-rl-approach-for-sdn-routing-optimization with MIT License | 5 votes |
def __init__(self, sess, state_size, action_size, DDPG_config): self.HIDDEN1_UNITS = DDPG_config['HIDDEN1_UNITS'] self.HIDDEN2_UNITS = DDPG_config['HIDDEN2_UNITS'] self.sess = sess self.BATCH_SIZE = DDPG_config['BATCH_SIZE'] self.TAU = DDPG_config['TAU'] self.LEARNING_RATE = DDPG_config['LRA'] self.ACTUM = DDPG_config['ACTUM'] if self.ACTUM == 'NEW': self.acti = 'sigmoid' elif self.ACTUM == 'DELTA': self.acti = 'tanh' self.h_acti = relu if DDPG_config['HACTI'] == 'selu': self.h_acti = selu K.set_session(sess) #Now create the model self.model, self.weights, self.state = self.create_actor_network(state_size, action_size) self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) self.action_gradient = tf.placeholder(tf.float32, [None, action_size]) self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient) grads = zip(self.params_grad, self.weights) self.optimize = tf.train.AdamOptimizer(self.LEARNING_RATE).apply_gradients(grads) self.sess.run(tf.global_variables_initializer())
Example #29
Source File: libraries.py From vergeml with MIT License | 5 votes |
def setup(env): stderr = sys.stderr sys.stderr = open(os.devnull, "w") # pylint: disable=W0612 try: import keras except Exception as e: raise e finally: sys.stderr = stderr from keras import backend as K if K.backend() == 'tensorflow': TensorFlowLibrary.setup(env) K.set_session(TensorFlowLibrary.create_session(env))
Example #30
Source File: triplet_loss_test.py From bootcamp with Apache License 2.0 | 5 votes |
def test_1(self): # ANCHOR 1 (512,), index = 0 # ANCHOR 2 (512,), index = 1 # ANCHOR 3 (512,), index = 2 # ANCHOR 4 (512,), index = 3 # ANCHOR 5 (512,), index = 4 # ANCHOR 6 (512,), index = 5 # POS EX 1 (512,), index = 6 # POS EX 2 (512,), index = 7 # POS EX 3 (512,), index = 8 # POS EX 4 (512,), index = 9 # POS EX 5 (512,), index = 10 # POS EX 6 (512,), index = 11 # NEG EX 1 (512,), index = 12 # NEG EX 2 (512,), index = 13 # NEG EX 3 (512,), index = 14 # NEG EX 4 (512,), index = 15 # NEG EX 5 (512,), index = 16 # NEG EX 6 (512,), index = 17 x2 = 1 sess = tf.InteractiveSession() K.set_session(sess) highest_loss = deep_speaker_loss(tf.constant(opposite_positive_equal_negative_batch()), x2).eval() high_loss = deep_speaker_loss(tf.constant(random_positive_random_negative_batch()), x2).eval() low_loss = deep_speaker_loss(tf.constant(equal_positive_random_negative_batch()), x2).eval() lowest_loss = deep_speaker_loss(tf.constant(equal_positive_opposite_negative_batch()), x2).eval() self.assertTrue(highest_loss >= high_loss >= low_loss >= lowest_loss)